8307301: Update HarfBuzz to 7.2.0
Reviewed-by: prr
This commit is contained in:
parent
b5a48277ab
commit
d8b230c384
@ -1,8 +1,8 @@
|
||||
## Harfbuzz v7.0.1
|
||||
## Harfbuzz v7.2.0
|
||||
|
||||
### Harfbuzz License
|
||||
|
||||
https://github.com/harfbuzz/harfbuzz/blob/7.0.1/COPYING
|
||||
https://github.com/harfbuzz/harfbuzz/blob/7.2.0/COPYING
|
||||
|
||||
<pre>
|
||||
|
||||
@ -10,7 +10,7 @@ HarfBuzz is licensed under the so-called "Old MIT" license. Details follow.
|
||||
For parts of HarfBuzz that are licensed under different licenses see individual
|
||||
files names COPYING in subdirectories where applicable.
|
||||
|
||||
Copyright © 2010-2022 Google, Inc.
|
||||
Copyright © 2010-2023 Google, Inc.
|
||||
Copyright © 2018-2020 Ebrahim Byagowi
|
||||
Copyright © 2004-2013 Red Hat, Inc.
|
||||
Copyright © 2019 Facebook, Inc.
|
||||
|
@ -40,7 +40,6 @@
|
||||
*/
|
||||
#define HB_OT_TAG_COLR HB_TAG('C','O','L','R')
|
||||
|
||||
|
||||
namespace OT {
|
||||
struct hb_paint_context_t;
|
||||
}
|
||||
@ -242,10 +241,15 @@ struct Variable
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const
|
||||
{ value.closurev1 (c); }
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
if (!value.subset (c)) return_trace (false);
|
||||
if (!value.subset (c, instancer, varIdxBase)) return_trace (false);
|
||||
if (c->plan->all_axes_pinned)
|
||||
return_trace (true);
|
||||
|
||||
//TODO: update varIdxBase for partial-instancing
|
||||
return_trace (c->serializer->embed (varIdxBase));
|
||||
}
|
||||
|
||||
@ -296,10 +300,11 @@ struct NoVariable
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const
|
||||
{ value.closurev1 (c); }
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
return_trace (value.subset (c));
|
||||
return_trace (value.subset (c, instancer, varIdxBase));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -337,11 +342,20 @@ struct ColorStop
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const
|
||||
{ c->add_palette_index (paletteIndex); }
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (*this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->stopOffset.set_float (stopOffset.to_float(instancer (varIdxBase, 0)));
|
||||
out->alpha.set_float (alpha.to_float (instancer (varIdxBase, 1)));
|
||||
}
|
||||
|
||||
return_trace (c->serializer->check_assign (out->paletteIndex, c->plan->colr_palettes.get (paletteIndex),
|
||||
HB_SERIALIZE_ERROR_INT_OVERFLOW));
|
||||
}
|
||||
@ -390,7 +404,8 @@ struct ColorLine
|
||||
stop.closurev1 (c);
|
||||
}
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->start_embed (this);
|
||||
@ -402,7 +417,7 @@ struct ColorLine
|
||||
|
||||
for (const auto& stop : stops.iter ())
|
||||
{
|
||||
if (!stop.subset (c)) return_trace (false);
|
||||
if (!stop.subset (c, instancer)) return_trace (false);
|
||||
}
|
||||
return_trace (true);
|
||||
}
|
||||
@ -523,6 +538,25 @@ struct Affine2x3
|
||||
return_trace (c->check_struct (this));
|
||||
}
|
||||
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (*this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->xx.set_float (xx.to_float(instancer (varIdxBase, 0)));
|
||||
out->yx.set_float (yx.to_float(instancer (varIdxBase, 1)));
|
||||
out->xy.set_float (xy.to_float(instancer (varIdxBase, 2)));
|
||||
out->yy.set_float (yy.to_float(instancer (varIdxBase, 3)));
|
||||
out->dx.set_float (dx.to_float(instancer (varIdxBase, 4)));
|
||||
out->dy.set_float (dy.to_float(instancer (varIdxBase, 5)));
|
||||
}
|
||||
return_trace (true);
|
||||
}
|
||||
|
||||
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
|
||||
{
|
||||
c->funcs->push_transform (c->data,
|
||||
@ -548,7 +582,8 @@ struct PaintColrLayers
|
||||
{
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer HB_UNUSED) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
@ -579,11 +614,20 @@ struct PaintSolid
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const
|
||||
{ c->add_palette_index (paletteIndex); }
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (*this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
out->alpha.set_float (alpha.to_float (instancer (varIdxBase, 0)));
|
||||
|
||||
if (format == 3 && c->plan->all_axes_pinned)
|
||||
out->format = 2;
|
||||
|
||||
return_trace (c->serializer->check_assign (out->paletteIndex, c->plan->colr_palettes.get (paletteIndex),
|
||||
HB_SERIALIZE_ERROR_INT_OVERFLOW));
|
||||
}
|
||||
@ -618,13 +662,28 @@ struct PaintLinearGradient
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const
|
||||
{ (this+colorLine).closurev1 (c); }
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->colorLine.serialize_subset (c, colorLine, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->x0 = x0 + (int) roundf (instancer (varIdxBase, 0));
|
||||
out->y0 = y0 + (int) roundf (instancer (varIdxBase, 1));
|
||||
out->x1 = x1 + (int) roundf (instancer (varIdxBase, 2));
|
||||
out->y1 = y1 + (int) roundf (instancer (varIdxBase, 3));
|
||||
out->x2 = x2 + (int) roundf (instancer (varIdxBase, 4));
|
||||
out->y2 = y2 + (int) roundf (instancer (varIdxBase, 5));
|
||||
}
|
||||
|
||||
if (format == 5 && c->plan->all_axes_pinned)
|
||||
out->format = 4;
|
||||
|
||||
return_trace (out->colorLine.serialize_subset (c, colorLine, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -669,13 +728,28 @@ struct PaintRadialGradient
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const
|
||||
{ (this+colorLine).closurev1 (c); }
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->colorLine.serialize_subset (c, colorLine, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->x0 = x0 + (int) roundf (instancer (varIdxBase, 0));
|
||||
out->y0 = y0 + (int) roundf (instancer (varIdxBase, 1));
|
||||
out->radius0 = radius0 + (unsigned) roundf (instancer (varIdxBase, 2));
|
||||
out->x1 = x1 + (int) roundf (instancer (varIdxBase, 3));
|
||||
out->y1 = y1 + (int) roundf (instancer (varIdxBase, 4));
|
||||
out->radius1 = radius1 + (unsigned) roundf (instancer (varIdxBase, 5));
|
||||
}
|
||||
|
||||
if (format == 7 && c->plan->all_axes_pinned)
|
||||
out->format = 6;
|
||||
|
||||
return_trace (out->colorLine.serialize_subset (c, colorLine, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -720,13 +794,26 @@ struct PaintSweepGradient
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const
|
||||
{ (this+colorLine).closurev1 (c); }
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->colorLine.serialize_subset (c, colorLine, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->centerX = centerX + (int) roundf (instancer (varIdxBase, 0));
|
||||
out->centerY = centerY + (int) roundf (instancer (varIdxBase, 1));
|
||||
out->startAngle.set_float (startAngle.to_float (instancer (varIdxBase, 2)));
|
||||
out->endAngle.set_float (endAngle.to_float (instancer (varIdxBase, 3)));
|
||||
}
|
||||
|
||||
if (format == 9 && c->plan->all_axes_pinned)
|
||||
out->format = 8;
|
||||
|
||||
return_trace (out->colorLine.serialize_subset (c, colorLine, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -746,8 +833,8 @@ struct PaintSweepGradient
|
||||
c->funcs->sweep_gradient (c->data, &cl,
|
||||
centerX + c->instancer (varIdxBase, 0),
|
||||
centerY + c->instancer (varIdxBase, 1),
|
||||
(startAngle.to_float (c->instancer (varIdxBase, 2)) + 1) * (float) M_PI,
|
||||
(endAngle.to_float (c->instancer (varIdxBase, 3)) + 1) * (float) M_PI);
|
||||
(startAngle.to_float (c->instancer (varIdxBase, 2)) + 1) * HB_PI,
|
||||
(endAngle.to_float (c->instancer (varIdxBase, 3)) + 1) * HB_PI);
|
||||
}
|
||||
|
||||
HBUINT8 format; /* format = 8(noVar) or 9 (Var) */
|
||||
@ -766,7 +853,8 @@ struct PaintGlyph
|
||||
{
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
@ -776,7 +864,7 @@ struct PaintGlyph
|
||||
HB_SERIALIZE_ERROR_INT_OVERFLOW))
|
||||
return_trace (false);
|
||||
|
||||
return_trace (out->paint.serialize_subset (c, paint, this));
|
||||
return_trace (out->paint.serialize_subset (c, paint, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -807,7 +895,8 @@ struct PaintColrGlyph
|
||||
{
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer HB_UNUSED) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
@ -836,13 +925,16 @@ struct PaintTransform
|
||||
{
|
||||
HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
if (!out->transform.serialize_copy (c->serializer, transform, this)) return_trace (false);
|
||||
return_trace (out->src.serialize_subset (c, src, this));
|
||||
if (!out->transform.serialize_subset (c, transform, this, instancer)) return_trace (false);
|
||||
if (format == 13 && c->plan->all_axes_pinned)
|
||||
out->format = 12;
|
||||
return_trace (out->src.serialize_subset (c, src, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -871,13 +963,24 @@ struct PaintTranslate
|
||||
{
|
||||
HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->dx = dx + (int) roundf (instancer (varIdxBase, 0));
|
||||
out->dy = dy + (int) roundf (instancer (varIdxBase, 1));
|
||||
}
|
||||
|
||||
if (format == 15 && c->plan->all_axes_pinned)
|
||||
out->format = 14;
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -908,13 +1011,24 @@ struct PaintScale
|
||||
{
|
||||
HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->scaleX.set_float (scaleX.to_float (instancer (varIdxBase, 0)));
|
||||
out->scaleY.set_float (scaleY.to_float (instancer (varIdxBase, 1)));
|
||||
}
|
||||
|
||||
if (format == 17 && c->plan->all_axes_pinned)
|
||||
out->format = 16;
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -945,13 +1059,26 @@ struct PaintScaleAroundCenter
|
||||
{
|
||||
HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->scaleX.set_float (scaleX.to_float (instancer (varIdxBase, 0)));
|
||||
out->scaleY.set_float (scaleY.to_float (instancer (varIdxBase, 1)));
|
||||
out->centerX = centerX + (int) roundf (instancer (varIdxBase, 2));
|
||||
out->centerY = centerY + (int) roundf (instancer (varIdxBase, 3));
|
||||
}
|
||||
|
||||
if (format == 19 && c->plan->all_axes_pinned)
|
||||
out->format = 18;
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -990,13 +1117,21 @@ struct PaintScaleUniform
|
||||
{
|
||||
HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
out->scale.set_float (scale.to_float (instancer (varIdxBase, 0)));
|
||||
|
||||
if (format == 21 && c->plan->all_axes_pinned)
|
||||
out->format = 20;
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -1025,13 +1160,25 @@ struct PaintScaleUniformAroundCenter
|
||||
{
|
||||
HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->scale.set_float (scale.to_float (instancer (varIdxBase, 0)));
|
||||
out->centerX = centerX + (int) roundf (instancer (varIdxBase, 1));
|
||||
out->centerY = centerY + (int) roundf (instancer (varIdxBase, 2));
|
||||
}
|
||||
|
||||
if (format == 23 && c->plan->all_axes_pinned)
|
||||
out->format = 22;
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -1068,13 +1215,21 @@ struct PaintRotate
|
||||
{
|
||||
HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
out->angle.set_float (angle.to_float (instancer (varIdxBase, 0)));
|
||||
|
||||
if (format == 25 && c->plan->all_axes_pinned)
|
||||
out->format = 24;
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -1103,13 +1258,25 @@ struct PaintRotateAroundCenter
|
||||
{
|
||||
HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->angle.set_float (angle.to_float (instancer (varIdxBase, 0)));
|
||||
out->centerX = centerX + (int) roundf (instancer (varIdxBase, 1));
|
||||
out->centerY = centerY + (int) roundf (instancer (varIdxBase, 2));
|
||||
}
|
||||
|
||||
if (format ==27 && c->plan->all_axes_pinned)
|
||||
out->format = 26;
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -1146,13 +1313,24 @@ struct PaintSkew
|
||||
{
|
||||
HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->xSkewAngle.set_float (xSkewAngle.to_float (instancer (varIdxBase, 0)));
|
||||
out->ySkewAngle.set_float (ySkewAngle.to_float (instancer (varIdxBase, 1)));
|
||||
}
|
||||
|
||||
if (format == 29 && c->plan->all_axes_pinned)
|
||||
out->format = 28;
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -1183,13 +1361,26 @@ struct PaintSkewAroundCenter
|
||||
{
|
||||
HB_INTERNAL void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this));
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->xSkewAngle.set_float (xSkewAngle.to_float (instancer (varIdxBase, 0)));
|
||||
out->ySkewAngle.set_float (ySkewAngle.to_float (instancer (varIdxBase, 1)));
|
||||
out->centerX = centerX + (int) roundf (instancer (varIdxBase, 2));
|
||||
out->centerY = centerY + (int) roundf (instancer (varIdxBase, 3));
|
||||
}
|
||||
|
||||
if (format == 31 && c->plan->all_axes_pinned)
|
||||
out->format = 30;
|
||||
|
||||
return_trace (out->src.serialize_subset (c, src, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -1228,14 +1419,15 @@ struct PaintComposite
|
||||
{
|
||||
void closurev1 (hb_colrv1_closure_context_t* c) const;
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
if (!out->src.serialize_subset (c, src, this)) return_trace (false);
|
||||
return_trace (out->backdrop.serialize_subset (c, backdrop, this));
|
||||
if (!out->src.serialize_subset (c, src, this, instancer)) return_trace (false);
|
||||
return_trace (out->backdrop.serialize_subset (c, backdrop, this, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
@ -1283,6 +1475,28 @@ struct ClipBoxFormat1
|
||||
clip_box.yMax = yMax;
|
||||
}
|
||||
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
uint32_t varIdxBase) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (*this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
if (instancer && !c->plan->pinned_at_default && varIdxBase != VarIdx::NO_VARIATION)
|
||||
{
|
||||
out->xMin = xMin + (int) roundf (instancer (varIdxBase, 0));
|
||||
out->yMin = yMin + (int) roundf (instancer (varIdxBase, 1));
|
||||
out->xMax = xMax + (int) roundf (instancer (varIdxBase, 2));
|
||||
out->yMax = yMax + (int) roundf (instancer (varIdxBase, 3));
|
||||
}
|
||||
|
||||
if (format == 2 && c->plan->all_axes_pinned)
|
||||
out->format = 1;
|
||||
|
||||
return_trace (true);
|
||||
}
|
||||
|
||||
public:
|
||||
HBUINT8 format; /* format = 1(noVar) or 2(Var)*/
|
||||
FWORD xMin;
|
||||
@ -1310,13 +1524,14 @@ struct ClipBoxFormat2 : Variable<ClipBoxFormat1>
|
||||
|
||||
struct ClipBox
|
||||
{
|
||||
ClipBox* copy (hb_serialize_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SERIALIZE (this);
|
||||
TRACE_SUBSET (this);
|
||||
switch (u.format) {
|
||||
case 1: return_trace (reinterpret_cast<ClipBox *> (c->embed (u.format1)));
|
||||
case 2: return_trace (reinterpret_cast<ClipBox *> (c->embed (u.format2)));
|
||||
default:return_trace (nullptr);
|
||||
case 1: return_trace (u.format1.subset (c, instancer, VarIdx::NO_VARIATION));
|
||||
case 2: return_trace (u.format2.subset (c, instancer));
|
||||
default:return_trace (c->default_return_value ());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1367,13 +1582,15 @@ struct ClipRecord
|
||||
int cmp (hb_codepoint_t g) const
|
||||
{ return g < startGlyphID ? -1 : g <= endGlyphID ? 0 : +1; }
|
||||
|
||||
ClipRecord* copy (hb_serialize_context_t *c, const void *base) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const void *base,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SERIALIZE (this);
|
||||
auto *out = c->embed (this);
|
||||
if (unlikely (!out)) return_trace (nullptr);
|
||||
if (!out->clipBox.serialize_copy (c, clipBox, base)) return_trace (nullptr);
|
||||
return_trace (out);
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->embed (*this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
return_trace (out->clipBox.serialize_subset (c, clipBox, base, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c, const void *base) const
|
||||
@ -1400,7 +1617,8 @@ DECLARE_NULL_NAMESPACE_BYTES (OT, ClipRecord);
|
||||
|
||||
struct ClipList
|
||||
{
|
||||
unsigned serialize_clip_records (hb_serialize_context_t *c,
|
||||
unsigned serialize_clip_records (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer,
|
||||
const hb_set_t& gids,
|
||||
const hb_map_t& gid_offset_map) const
|
||||
{
|
||||
@ -1432,7 +1650,7 @@ struct ClipList
|
||||
record.endGlyphID = prev_gid;
|
||||
record.clipBox = prev_offset;
|
||||
|
||||
if (!c->copy (record, this)) return_trace (0);
|
||||
if (!record.subset (c, this, instancer)) return_trace (0);
|
||||
count++;
|
||||
|
||||
start_gid = _;
|
||||
@ -1446,13 +1664,14 @@ struct ClipList
|
||||
record.startGlyphID = start_gid;
|
||||
record.endGlyphID = prev_gid;
|
||||
record.clipBox = prev_offset;
|
||||
if (!c->copy (record, this)) return_trace (0);
|
||||
if (!record.subset (c, this, instancer)) return_trace (0);
|
||||
count++;
|
||||
}
|
||||
return_trace (count);
|
||||
}
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->start_embed (*this);
|
||||
@ -1477,7 +1696,7 @@ struct ClipList
|
||||
}
|
||||
}
|
||||
|
||||
unsigned count = serialize_clip_records (c->serializer, new_gids, new_gid_offset_map);
|
||||
unsigned count = serialize_clip_records (c, instancer, new_gids, new_gid_offset_map);
|
||||
if (!count) return_trace (false);
|
||||
return_trace (c->serializer->check_assign (out->clips.len, count, HB_SERIALIZE_ERROR_INT_OVERFLOW));
|
||||
}
|
||||
@ -1611,7 +1830,8 @@ struct BaseGlyphPaintRecord
|
||||
{ return g < glyphId ? -1 : g > glyphId ? 1 : 0; }
|
||||
|
||||
bool serialize (hb_serialize_context_t *s, const hb_map_t* glyph_map,
|
||||
const void* src_base, hb_subset_context_t *c) const
|
||||
const void* src_base, hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SERIALIZE (this);
|
||||
auto *out = s->embed (this);
|
||||
@ -1620,7 +1840,7 @@ struct BaseGlyphPaintRecord
|
||||
HB_SERIALIZE_ERROR_INT_OVERFLOW))
|
||||
return_trace (false);
|
||||
|
||||
return_trace (out->paint.serialize_subset (c, paint, src_base));
|
||||
return_trace (out->paint.serialize_subset (c, paint, src_base, instancer));
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c, const void *base) const
|
||||
@ -1639,7 +1859,8 @@ struct BaseGlyphPaintRecord
|
||||
|
||||
struct BaseGlyphList : SortedArray32Of<BaseGlyphPaintRecord>
|
||||
{
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->start_embed (this);
|
||||
@ -1651,7 +1872,7 @@ struct BaseGlyphList : SortedArray32Of<BaseGlyphPaintRecord>
|
||||
unsigned gid = _.glyphId;
|
||||
if (!glyphset->has (gid)) continue;
|
||||
|
||||
if (_.serialize (c->serializer, c->plan->glyph_map, this, c)) out->len++;
|
||||
if (_.serialize (c->serializer, c->plan->glyph_map, this, c, instancer)) out->len++;
|
||||
else return_trace (false);
|
||||
}
|
||||
|
||||
@ -1670,7 +1891,8 @@ struct LayerList : Array32OfOffset32To<Paint>
|
||||
const Paint& get_paint (unsigned i) const
|
||||
{ return this+(*this)[i]; }
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
bool subset (hb_subset_context_t *c,
|
||||
const VarStoreInstancer &instancer) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
auto *out = c->serializer->start_embed (this);
|
||||
@ -1681,7 +1903,7 @@ struct LayerList : Array32OfOffset32To<Paint>
|
||||
|
||||
{
|
||||
auto *o = out->serialize_append (c->serializer);
|
||||
if (unlikely (!o) || !o->serialize_subset (c, _.second, this))
|
||||
if (unlikely (!o) || !o->serialize_subset (c, _.second, this, instancer))
|
||||
return_trace (false);
|
||||
}
|
||||
return_trace (true);
|
||||
@ -1883,7 +2105,6 @@ struct COLR
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
|
||||
const hb_map_t &reverse_glyph_map = *c->plan->reverse_glyph_map;
|
||||
const hb_set_t& glyphset = c->plan->_glyphset_colred;
|
||||
|
||||
@ -1954,7 +2175,12 @@ struct COLR
|
||||
|
||||
auto snap = c->serializer->snapshot ();
|
||||
if (!c->serializer->allocate_size<void> (5 * HBUINT32::static_size)) return_trace (false);
|
||||
if (!colr_prime->baseGlyphList.serialize_subset (c, baseGlyphList, this))
|
||||
|
||||
VarStoreInstancer instancer (varStore ? &(this+varStore) : nullptr,
|
||||
varIdxMap ? &(this+varIdxMap) : nullptr,
|
||||
c->plan->normalized_coords.as_array ());
|
||||
|
||||
if (!colr_prime->baseGlyphList.serialize_subset (c, baseGlyphList, this, instancer))
|
||||
{
|
||||
if (c->serializer->in_error ()) return_trace (false);
|
||||
//no more COLRv1 glyphs: downgrade to version 0
|
||||
@ -1964,8 +2190,11 @@ struct COLR
|
||||
|
||||
if (!colr_prime->serialize_V0 (c->serializer, version, base_it, layer_it)) return_trace (false);
|
||||
|
||||
colr_prime->layerList.serialize_subset (c, layerList, this);
|
||||
colr_prime->clipList.serialize_subset (c, clipList, this);
|
||||
colr_prime->layerList.serialize_subset (c, layerList, this, instancer);
|
||||
colr_prime->clipList.serialize_subset (c, clipList, this, instancer);
|
||||
if (!varStore || c->plan->all_axes_pinned)
|
||||
return_trace (true);
|
||||
|
||||
colr_prime->varIdxMap.serialize_copy (c->serializer, varIdxMap, this);
|
||||
colr_prime->varStore.serialize_copy (c->serializer, varStore, this);
|
||||
return_trace (true);
|
||||
@ -1984,14 +2213,15 @@ struct COLR
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#ifndef HB_NO_PAINT
|
||||
bool
|
||||
get_extents (hb_font_t *font, hb_codepoint_t glyph, hb_glyph_extents_t *extents) const
|
||||
{
|
||||
if (version != 1)
|
||||
return false;
|
||||
|
||||
VarStoreInstancer instancer (this+varStore,
|
||||
this+varIdxMap,
|
||||
VarStoreInstancer instancer (&(this+varStore),
|
||||
&(this+varIdxMap),
|
||||
hb_array (font->coords, font->num_coords));
|
||||
|
||||
if (get_clip (glyph, extents, instancer))
|
||||
@ -2022,6 +2252,7 @@ struct COLR
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool
|
||||
has_paint_for_glyph (hb_codepoint_t glyph) const
|
||||
@ -2045,11 +2276,12 @@ struct COLR
|
||||
instancer);
|
||||
}
|
||||
|
||||
#ifndef HB_NO_PAINT
|
||||
bool
|
||||
paint_glyph (hb_font_t *font, hb_codepoint_t glyph, hb_paint_funcs_t *funcs, void *data, unsigned int palette_index, hb_color_t foreground, bool clip = true) const
|
||||
{
|
||||
VarStoreInstancer instancer (this+varStore,
|
||||
this+varIdxMap,
|
||||
VarStoreInstancer instancer (&(this+varStore),
|
||||
&(this+varIdxMap),
|
||||
hb_array (font->coords, font->num_coords));
|
||||
hb_paint_context_t c (this, funcs, data, font, palette_index, foreground, instancer);
|
||||
|
||||
@ -2060,8 +2292,8 @@ struct COLR
|
||||
{
|
||||
// COLRv1 glyph
|
||||
|
||||
VarStoreInstancer instancer (this+varStore,
|
||||
this+varIdxMap,
|
||||
VarStoreInstancer instancer (&(this+varStore),
|
||||
&(this+varIdxMap),
|
||||
hb_array (font->coords, font->num_coords));
|
||||
|
||||
bool is_bounded = true;
|
||||
@ -2131,6 +2363,7 @@ struct COLR
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
protected:
|
||||
HBUINT16 version; /* Table version number (starts at 0). */
|
||||
|
@ -73,6 +73,30 @@ struct CPALV1Tail
|
||||
}
|
||||
|
||||
public:
|
||||
void collect_name_ids (const void *base,
|
||||
unsigned palette_count,
|
||||
unsigned color_count,
|
||||
const hb_map_t *color_index_map,
|
||||
hb_set_t *nameids_to_retain /* OUT */) const
|
||||
{
|
||||
if (paletteLabelsZ)
|
||||
{
|
||||
+ (base+paletteLabelsZ).as_array (palette_count)
|
||||
| hb_sink (nameids_to_retain)
|
||||
;
|
||||
}
|
||||
|
||||
if (colorLabelsZ)
|
||||
{
|
||||
const hb_array_t<const NameID> colorLabels = (base+colorLabelsZ).as_array (color_count);
|
||||
for (unsigned i = 0; i < color_count; i++)
|
||||
{
|
||||
if (!color_index_map->has (i)) continue;
|
||||
nameids_to_retain->add (colorLabels[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool serialize (hb_serialize_context_t *c,
|
||||
unsigned palette_count,
|
||||
unsigned color_count,
|
||||
@ -95,13 +119,10 @@ struct CPALV1Tail
|
||||
if (colorLabelsZ)
|
||||
{
|
||||
c->push ();
|
||||
for (const auto _ : colorLabels)
|
||||
for (unsigned i = 0; i < color_count; i++)
|
||||
{
|
||||
const hb_codepoint_t *v;
|
||||
if (!color_index_map->has (_, &v)) continue;
|
||||
NameID new_color_idx;
|
||||
new_color_idx = *v;
|
||||
if (!c->copy<NameID> (new_color_idx))
|
||||
if (!color_index_map->has (i)) continue;
|
||||
if (!c->copy<NameID> (colorLabels[i]))
|
||||
{
|
||||
c->pop_discard ();
|
||||
return_trace (false);
|
||||
@ -189,6 +210,13 @@ struct CPAL
|
||||
return numColors;
|
||||
}
|
||||
|
||||
void collect_name_ids (const hb_map_t *color_index_map,
|
||||
hb_set_t *nameids_to_retain /* OUT */) const
|
||||
{
|
||||
if (version == 1)
|
||||
v1 ().collect_name_ids (this, numPalettes, numColors, color_index_map, nameids_to_retain);
|
||||
}
|
||||
|
||||
private:
|
||||
const CPALV1Tail& v1 () const
|
||||
{
|
||||
|
@ -126,15 +126,15 @@ struct MarkLigPosFormat1_2
|
||||
return_trace (false);
|
||||
}
|
||||
|
||||
j = (unsigned) c->last_base;
|
||||
unsigned idx = (unsigned) c->last_base;
|
||||
|
||||
/* Checking that matched glyph is actually a ligature by GDEF is too strong; disabled */
|
||||
//if (!_hb_glyph_info_is_ligature (&buffer->info[j])) { return_trace (false); }
|
||||
//if (!_hb_glyph_info_is_ligature (&buffer->info[idx])) { return_trace (false); }
|
||||
|
||||
unsigned int lig_index = (this+ligatureCoverage).get_coverage (buffer->info[j].codepoint);
|
||||
unsigned int lig_index = (this+ligatureCoverage).get_coverage (buffer->info[idx].codepoint);
|
||||
if (lig_index == NOT_COVERED)
|
||||
{
|
||||
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
|
||||
buffer->unsafe_to_concat_from_outbuffer (idx, buffer->idx + 1);
|
||||
return_trace (false);
|
||||
}
|
||||
|
||||
@ -145,7 +145,7 @@ struct MarkLigPosFormat1_2
|
||||
unsigned int comp_count = lig_attach.rows;
|
||||
if (unlikely (!comp_count))
|
||||
{
|
||||
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
|
||||
buffer->unsafe_to_concat_from_outbuffer (idx, buffer->idx + 1);
|
||||
return_trace (false);
|
||||
}
|
||||
|
||||
@ -154,7 +154,7 @@ struct MarkLigPosFormat1_2
|
||||
* can directly use the component index. If not, we attach the mark
|
||||
* glyph to the last component of the ligature. */
|
||||
unsigned int comp_index;
|
||||
unsigned int lig_id = _hb_glyph_info_get_lig_id (&buffer->info[j]);
|
||||
unsigned int lig_id = _hb_glyph_info_get_lig_id (&buffer->info[idx]);
|
||||
unsigned int mark_id = _hb_glyph_info_get_lig_id (&buffer->cur());
|
||||
unsigned int mark_comp = _hb_glyph_info_get_lig_comp (&buffer->cur());
|
||||
if (lig_id && lig_id == mark_id && mark_comp > 0)
|
||||
@ -162,7 +162,7 @@ struct MarkLigPosFormat1_2
|
||||
else
|
||||
comp_index = comp_count - 1;
|
||||
|
||||
return_trace ((this+markArray).apply (c, mark_index, comp_index, lig_attach, classCount, j));
|
||||
return_trace ((this+markArray).apply (c, mark_index, comp_index, lig_attach, classCount, idx));
|
||||
}
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
|
@ -55,7 +55,7 @@ struct PairPosFormat1_3
|
||||
|
||||
if (pairSet.len > glyphs->get_population () * hb_bit_storage ((unsigned) pairSet.len) / 4)
|
||||
{
|
||||
for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);)
|
||||
for (hb_codepoint_t g : glyphs->iter())
|
||||
{
|
||||
unsigned i = cov.get_coverage (g);
|
||||
if ((this+pairSet[i]).intersects (glyphs, valueFormat))
|
||||
|
@ -28,7 +28,15 @@ struct SinglePosFormat1
|
||||
TRACE_SANITIZE (this);
|
||||
return_trace (c->check_struct (this) &&
|
||||
coverage.sanitize (c, this) &&
|
||||
/* The coverage table may use a range to represent a set
|
||||
* of glyphs, which means a small number of bytes can
|
||||
* generate a large glyph set. Manually modify the
|
||||
* sanitizer max ops to take this into account.
|
||||
*
|
||||
* Note: This check *must* be right after coverage sanitize. */
|
||||
c->check_ops ((this + coverage).get_population () >> 1) &&
|
||||
valueFormat.sanitize_value (c, this, values));
|
||||
|
||||
}
|
||||
|
||||
bool intersects (const hb_set_t *glyphs) const
|
||||
|
@ -29,6 +29,9 @@ struct Ligature
|
||||
bool intersects (const hb_set_t *glyphs) const
|
||||
{ return hb_all (component, glyphs); }
|
||||
|
||||
bool intersects_lig_glyph (const hb_set_t *glyphs) const
|
||||
{ return glyphs->has(ligGlyph); }
|
||||
|
||||
void closure (hb_closure_context_t *c) const
|
||||
{
|
||||
if (!intersects (c->glyphs)) return;
|
||||
|
@ -34,6 +34,18 @@ struct LigatureSet
|
||||
;
|
||||
}
|
||||
|
||||
bool intersects_lig_glyph (const hb_set_t *glyphs) const
|
||||
{
|
||||
return
|
||||
+ hb_iter (ligature)
|
||||
| hb_map (hb_add (this))
|
||||
| hb_map ([glyphs] (const Ligature<Types> &_) {
|
||||
return _.intersects_lig_glyph (glyphs) && _.intersects (glyphs);
|
||||
})
|
||||
| hb_any
|
||||
;
|
||||
}
|
||||
|
||||
void closure (hb_closure_context_t *c) const
|
||||
{
|
||||
+ hb_iter (ligature)
|
||||
|
@ -130,7 +130,7 @@ struct LigatureSubstFormat1_2
|
||||
+ hb_zip (this+coverage, hb_iter (ligatureSet) | hb_map (hb_add (this)))
|
||||
| hb_filter (glyphset, hb_first)
|
||||
| hb_filter ([&] (const LigatureSet<Types>& _) {
|
||||
return _.intersects (&glyphset);
|
||||
return _.intersects_lig_glyph (&glyphset);
|
||||
}, hb_second)
|
||||
| hb_map (hb_first)
|
||||
| hb_sink (new_coverage);
|
||||
|
@ -25,7 +25,15 @@ struct SingleSubstFormat1_3
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
{
|
||||
TRACE_SANITIZE (this);
|
||||
return_trace (coverage.sanitize (c, this) && deltaGlyphID.sanitize (c));
|
||||
return_trace (c->check_struct (this) &&
|
||||
coverage.sanitize (c, this) &&
|
||||
/* The coverage table may use a range to represent a set
|
||||
* of glyphs, which means a small number of bytes can
|
||||
* generate a large glyph set. Manually modify the
|
||||
* sanitizer max ops to take this into account.
|
||||
*
|
||||
* Note: This check *must* be right after coverage sanitize. */
|
||||
c->check_ops ((this + coverage).get_population () >> 1));
|
||||
}
|
||||
|
||||
hb_codepoint_t get_mask () const
|
||||
@ -87,6 +95,34 @@ struct SingleSubstFormat1_3
|
||||
bool would_apply (hb_would_apply_context_t *c) const
|
||||
{ return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
|
||||
|
||||
unsigned
|
||||
get_glyph_alternates (hb_codepoint_t glyph_id,
|
||||
unsigned start_offset,
|
||||
unsigned *alternate_count /* IN/OUT. May be NULL. */,
|
||||
hb_codepoint_t *alternate_glyphs /* OUT. May be NULL. */) const
|
||||
{
|
||||
unsigned int index = (this+coverage).get_coverage (glyph_id);
|
||||
if (likely (index == NOT_COVERED))
|
||||
{
|
||||
if (alternate_count)
|
||||
*alternate_count = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (alternate_count && *alternate_count)
|
||||
{
|
||||
hb_codepoint_t d = deltaGlyphID;
|
||||
hb_codepoint_t mask = get_mask ();
|
||||
|
||||
glyph_id = (glyph_id + d) & mask;
|
||||
|
||||
*alternate_glyphs = glyph_id;
|
||||
*alternate_count = 1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
bool apply (hb_ot_apply_context_t *c) const
|
||||
{
|
||||
TRACE_APPLY (this);
|
||||
|
@ -75,6 +75,31 @@ struct SingleSubstFormat2_4
|
||||
bool would_apply (hb_would_apply_context_t *c) const
|
||||
{ return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
|
||||
|
||||
unsigned
|
||||
get_glyph_alternates (hb_codepoint_t glyph_id,
|
||||
unsigned start_offset,
|
||||
unsigned *alternate_count /* IN/OUT. May be NULL. */,
|
||||
hb_codepoint_t *alternate_glyphs /* OUT. May be NULL. */) const
|
||||
{
|
||||
unsigned int index = (this+coverage).get_coverage (glyph_id);
|
||||
if (likely (index == NOT_COVERED))
|
||||
{
|
||||
if (alternate_count)
|
||||
*alternate_count = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (alternate_count && *alternate_count)
|
||||
{
|
||||
glyph_id = substitute[index];
|
||||
|
||||
*alternate_glyphs = glyph_id;
|
||||
*alternate_count = 1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
bool apply (hb_ot_apply_context_t *c) const
|
||||
{
|
||||
TRACE_APPLY (this);
|
||||
|
@ -87,27 +87,34 @@ struct CompositeGlyphRecord
|
||||
}
|
||||
}
|
||||
|
||||
void transform_points (contour_point_vector_t &points) const
|
||||
void transform_points (contour_point_vector_t &points,
|
||||
const float (&matrix)[4],
|
||||
const contour_point_t &trans) const
|
||||
{
|
||||
float matrix[4];
|
||||
contour_point_t trans;
|
||||
if (get_transformation (matrix, trans))
|
||||
if (scaled_offsets ())
|
||||
{
|
||||
if (scaled_offsets ())
|
||||
{
|
||||
points.translate (trans);
|
||||
points.transform (matrix);
|
||||
}
|
||||
else
|
||||
{
|
||||
points.transform (matrix);
|
||||
points.translate (trans);
|
||||
}
|
||||
points.translate (trans);
|
||||
points.transform (matrix);
|
||||
}
|
||||
else
|
||||
{
|
||||
points.transform (matrix);
|
||||
points.translate (trans);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned compile_with_deltas (const contour_point_t &p_delta,
|
||||
char *out) const
|
||||
bool get_points (contour_point_vector_t &points) const
|
||||
{
|
||||
float matrix[4];
|
||||
contour_point_t trans;
|
||||
get_transformation (matrix, trans);
|
||||
if (unlikely (!points.resize (points.length + 1))) return false;
|
||||
points[points.length - 1] = trans;
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned compile_with_point (const contour_point_t &point,
|
||||
char *out) const
|
||||
{
|
||||
const HBINT8 *p = &StructAfter<const HBINT8> (flags);
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
@ -121,18 +128,17 @@ struct CompositeGlyphRecord
|
||||
unsigned len_before_val = (const char *)p - (const char *)this;
|
||||
if (flags & ARG_1_AND_2_ARE_WORDS)
|
||||
{
|
||||
// no overflow, copy and update value with deltas
|
||||
// no overflow, copy value
|
||||
hb_memcpy (out, this, len);
|
||||
|
||||
const HBINT16 *px = reinterpret_cast<const HBINT16 *> (p);
|
||||
HBINT16 *o = reinterpret_cast<HBINT16 *> (out + len_before_val);
|
||||
o[0] = px[0] + roundf (p_delta.x);
|
||||
o[1] = px[1] + roundf (p_delta.y);
|
||||
o[0] = roundf (point.x);
|
||||
o[1] = roundf (point.y);
|
||||
}
|
||||
else
|
||||
{
|
||||
int new_x = p[0] + roundf (p_delta.x);
|
||||
int new_y = p[1] + roundf (p_delta.y);
|
||||
int new_x = roundf (point.x);
|
||||
int new_y = roundf (point.y);
|
||||
if (new_x <= 127 && new_x >= -128 &&
|
||||
new_y <= 127 && new_y >= -128)
|
||||
{
|
||||
@ -143,7 +149,7 @@ struct CompositeGlyphRecord
|
||||
}
|
||||
else
|
||||
{
|
||||
// int8 overflows after deltas applied
|
||||
// new point value has an int8 overflow
|
||||
hb_memcpy (out, this, len_before_val);
|
||||
|
||||
//update flags
|
||||
@ -171,6 +177,7 @@ struct CompositeGlyphRecord
|
||||
bool scaled_offsets () const
|
||||
{ return (flags & (SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET)) == SCALED_COMPONENT_OFFSET; }
|
||||
|
||||
public:
|
||||
bool get_transformation (float (&matrix)[4], contour_point_t &trans) const
|
||||
{
|
||||
matrix[0] = matrix[3] = 1.f;
|
||||
@ -225,7 +232,6 @@ struct CompositeGlyphRecord
|
||||
return tx || ty;
|
||||
}
|
||||
|
||||
public:
|
||||
hb_codepoint_t get_gid () const
|
||||
{
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
@ -246,6 +252,27 @@ struct CompositeGlyphRecord
|
||||
StructAfter<HBGlyphID16> (flags) = gid;
|
||||
}
|
||||
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
void lower_gid_24_to_16 ()
|
||||
{
|
||||
hb_codepoint_t gid = get_gid ();
|
||||
if (!(flags & GID_IS_24BIT) || gid > 0xFFFFu)
|
||||
return;
|
||||
|
||||
/* Lower the flag and move the rest of the struct down. */
|
||||
|
||||
unsigned size = get_size ();
|
||||
char *end = (char *) this + size;
|
||||
char *p = &StructAfter<char> (flags);
|
||||
p += HBGlyphID24::static_size;
|
||||
|
||||
flags = flags & ~GID_IS_24BIT;
|
||||
set_gid (gid);
|
||||
|
||||
memmove (p - HBGlyphID24::static_size + HBGlyphID16::static_size, p, end - p);
|
||||
}
|
||||
#endif
|
||||
|
||||
protected:
|
||||
HBUINT16 flags;
|
||||
HBUINT24 pad;
|
||||
@ -304,7 +331,7 @@ struct CompositeGlyph
|
||||
}
|
||||
|
||||
bool compile_bytes_with_deltas (const hb_bytes_t &source_bytes,
|
||||
const contour_point_vector_t &deltas,
|
||||
const contour_point_vector_t &points_with_deltas,
|
||||
hb_bytes_t &dest_bytes /* OUT */)
|
||||
{
|
||||
if (source_bytes.length <= GlyphHeader::static_size ||
|
||||
@ -319,7 +346,7 @@ struct CompositeGlyph
|
||||
/* try to allocate more memories than source glyph bytes
|
||||
* in case that there might be an overflow for int8 value
|
||||
* and we would need to use int16 instead */
|
||||
char *o = (char *) hb_calloc (source_len + source_len/2, sizeof (char));
|
||||
char *o = (char *) hb_calloc (source_len * 2, sizeof (char));
|
||||
if (unlikely (!o)) return false;
|
||||
|
||||
const CompositeGlyphRecord *c = reinterpret_cast<const CompositeGlyphRecord *> (source_bytes.arrayZ + GlyphHeader::static_size);
|
||||
@ -329,8 +356,11 @@ struct CompositeGlyph
|
||||
unsigned i = 0, source_comp_len = 0;
|
||||
for (const auto &component : it)
|
||||
{
|
||||
/* last 4 points in deltas are phantom points and should not be included */
|
||||
if (i >= deltas.length - 4) return false;
|
||||
/* last 4 points in points_with_deltas are phantom points and should not be included */
|
||||
if (i >= points_with_deltas.length - 4) {
|
||||
free (o);
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned comp_len = component.get_size ();
|
||||
if (component.is_anchored ())
|
||||
@ -340,7 +370,7 @@ struct CompositeGlyph
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned new_len = component.compile_with_deltas (deltas[i], p);
|
||||
unsigned new_len = component.compile_with_point (points_with_deltas[i], p);
|
||||
p += new_len;
|
||||
}
|
||||
i++;
|
||||
|
@ -29,7 +29,14 @@ enum phantom_point_index_t
|
||||
|
||||
struct Glyph
|
||||
{
|
||||
enum glyph_type_t { EMPTY, SIMPLE, COMPOSITE, VAR_COMPOSITE };
|
||||
enum glyph_type_t {
|
||||
EMPTY,
|
||||
SIMPLE,
|
||||
COMPOSITE,
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
VAR_COMPOSITE,
|
||||
#endif
|
||||
};
|
||||
|
||||
public:
|
||||
composite_iter_t get_composite_iterator () const
|
||||
@ -39,15 +46,23 @@ struct Glyph
|
||||
}
|
||||
var_composite_iter_t get_var_composite_iterator () const
|
||||
{
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
if (type != VAR_COMPOSITE) return var_composite_iter_t ();
|
||||
return VarCompositeGlyph (*header, bytes).iter ();
|
||||
#else
|
||||
return var_composite_iter_t ();
|
||||
#endif
|
||||
}
|
||||
|
||||
const hb_bytes_t trim_padding () const
|
||||
{
|
||||
switch (type) {
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
case VAR_COMPOSITE: return VarCompositeGlyph (*header, bytes).trim_padding ();
|
||||
#endif
|
||||
case COMPOSITE: return CompositeGlyph (*header, bytes).trim_padding ();
|
||||
case SIMPLE: return SimpleGlyph (*header, bytes).trim_padding ();
|
||||
case EMPTY: return bytes;
|
||||
default: return bytes;
|
||||
}
|
||||
}
|
||||
@ -55,27 +70,36 @@ struct Glyph
|
||||
void drop_hints ()
|
||||
{
|
||||
switch (type) {
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
case VAR_COMPOSITE: return; // No hinting
|
||||
#endif
|
||||
case COMPOSITE: CompositeGlyph (*header, bytes).drop_hints (); return;
|
||||
case SIMPLE: SimpleGlyph (*header, bytes).drop_hints (); return;
|
||||
default: return;
|
||||
case EMPTY: return;
|
||||
}
|
||||
}
|
||||
|
||||
void set_overlaps_flag ()
|
||||
{
|
||||
switch (type) {
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
case VAR_COMPOSITE: return; // No overlaps flag
|
||||
#endif
|
||||
case COMPOSITE: CompositeGlyph (*header, bytes).set_overlaps_flag (); return;
|
||||
case SIMPLE: SimpleGlyph (*header, bytes).set_overlaps_flag (); return;
|
||||
default: return;
|
||||
case EMPTY: return;
|
||||
}
|
||||
}
|
||||
|
||||
void drop_hints_bytes (hb_bytes_t &dest_start, hb_bytes_t &dest_end) const
|
||||
{
|
||||
switch (type) {
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
case VAR_COMPOSITE: return; // No hinting
|
||||
#endif
|
||||
case COMPOSITE: CompositeGlyph (*header, bytes).drop_hints_bytes (dest_start); return;
|
||||
case SIMPLE: SimpleGlyph (*header, bytes).drop_hints_bytes (dest_start, dest_end); return;
|
||||
default: return;
|
||||
case EMPTY: return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,7 +205,7 @@ struct Glyph
|
||||
hb_bytes_t &dest_start, /* IN/OUT */
|
||||
hb_bytes_t &dest_end /* OUT */)
|
||||
{
|
||||
contour_point_vector_t all_points, deltas;
|
||||
contour_point_vector_t all_points, points_with_deltas;
|
||||
unsigned composite_contours = 0;
|
||||
head_maxp_info_t *head_maxp_info_p = &plan->head_maxp_info;
|
||||
unsigned *composite_contours_p = &composite_contours;
|
||||
@ -195,7 +219,7 @@ struct Glyph
|
||||
composite_contours_p = nullptr;
|
||||
}
|
||||
|
||||
if (!get_points (font, glyf, all_points, &deltas, head_maxp_info_p, composite_contours_p, false, false))
|
||||
if (!get_points (font, glyf, all_points, &points_with_deltas, head_maxp_info_p, composite_contours_p, false, false))
|
||||
return false;
|
||||
|
||||
// .notdef, set type to empty so we only update metrics and don't compile bytes for
|
||||
@ -209,11 +233,20 @@ struct Glyph
|
||||
}
|
||||
|
||||
//dont compile bytes when pinned at default, just recalculate bounds
|
||||
if (!plan->pinned_at_default) {
|
||||
switch (type) {
|
||||
if (!plan->pinned_at_default)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
case VAR_COMPOSITE:
|
||||
// TODO
|
||||
dest_end = hb_bytes_t ();
|
||||
break;
|
||||
#endif
|
||||
|
||||
case COMPOSITE:
|
||||
if (!CompositeGlyph (*header, bytes).compile_bytes_with_deltas (dest_start,
|
||||
deltas,
|
||||
points_with_deltas,
|
||||
dest_end))
|
||||
return false;
|
||||
break;
|
||||
@ -223,7 +256,7 @@ struct Glyph
|
||||
dest_end))
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
case EMPTY:
|
||||
/* set empty bytes for empty glyph
|
||||
* do not use source glyph's pointers */
|
||||
dest_start = hb_bytes_t ();
|
||||
@ -247,7 +280,7 @@ struct Glyph
|
||||
template <typename accelerator_t>
|
||||
bool get_points (hb_font_t *font, const accelerator_t &glyf_accelerator,
|
||||
contour_point_vector_t &all_points /* OUT */,
|
||||
contour_point_vector_t *deltas = nullptr, /* OUT */
|
||||
contour_point_vector_t *points_with_deltas = nullptr, /* OUT */
|
||||
head_maxp_info_t * head_maxp_info = nullptr, /* OUT */
|
||||
unsigned *composite_contours = nullptr, /* OUT */
|
||||
bool shift_points_hori = true,
|
||||
@ -287,9 +320,8 @@ struct Glyph
|
||||
break;
|
||||
case COMPOSITE:
|
||||
{
|
||||
/* pseudo component points for each component in composite glyph */
|
||||
unsigned num_points = hb_len (CompositeGlyph (*header, bytes).iter ());
|
||||
if (unlikely (!points.resize (num_points))) return false;
|
||||
for (auto &item : get_composite_iterator ())
|
||||
if (unlikely (!item.get_points (points))) return false;
|
||||
break;
|
||||
}
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
@ -299,7 +331,7 @@ struct Glyph
|
||||
if (unlikely (!item.get_points (points))) return false;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
case EMPTY:
|
||||
break;
|
||||
}
|
||||
|
||||
@ -327,17 +359,11 @@ struct Glyph
|
||||
#endif
|
||||
;
|
||||
phantoms[PHANTOM_LEFT].x = h_delta;
|
||||
phantoms[PHANTOM_RIGHT].x = h_adv + h_delta;
|
||||
phantoms[PHANTOM_RIGHT].x = (int) h_adv + h_delta;
|
||||
phantoms[PHANTOM_TOP].y = v_orig;
|
||||
phantoms[PHANTOM_BOTTOM].y = v_orig - (int) v_adv;
|
||||
}
|
||||
|
||||
if (deltas != nullptr && depth == 0 && type == COMPOSITE)
|
||||
{
|
||||
if (unlikely (!deltas->resize (points.length))) return false;
|
||||
deltas->copy_vector (points);
|
||||
}
|
||||
|
||||
#ifndef HB_NO_VAR
|
||||
glyf_accelerator.gvar->apply_deltas_to_points (gid,
|
||||
coords,
|
||||
@ -346,13 +372,10 @@ struct Glyph
|
||||
|
||||
// mainly used by CompositeGlyph calculating new X/Y offset value so no need to extend it
|
||||
// with child glyphs' points
|
||||
if (deltas != nullptr && depth == 0 && type == COMPOSITE)
|
||||
if (points_with_deltas != nullptr && depth == 0 && type == COMPOSITE)
|
||||
{
|
||||
for (unsigned i = 0 ; i < points.length; i++)
|
||||
{
|
||||
deltas->arrayZ[i].x = points.arrayZ[i].x - deltas->arrayZ[i].x;
|
||||
deltas->arrayZ[i].y = points.arrayZ[i].y - deltas->arrayZ[i].y;
|
||||
}
|
||||
if (unlikely (!points_with_deltas->resize (points.length))) return false;
|
||||
points_with_deltas->copy_vector (points);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
@ -373,7 +396,7 @@ struct Glyph
|
||||
.get_points (font,
|
||||
glyf_accelerator,
|
||||
comp_points,
|
||||
deltas,
|
||||
points_with_deltas,
|
||||
head_maxp_info,
|
||||
composite_contours,
|
||||
shift_points_hori,
|
||||
@ -389,11 +412,12 @@ struct Glyph
|
||||
for (unsigned int i = 0; i < PHANTOM_COUNT; i++)
|
||||
phantoms[i] = comp_points[comp_points.length - PHANTOM_COUNT + i];
|
||||
|
||||
/* Apply component transformation & translation */
|
||||
item.transform_points (comp_points);
|
||||
float matrix[4];
|
||||
contour_point_t default_trans;
|
||||
item.get_transformation (matrix, default_trans);
|
||||
|
||||
/* Apply translation from gvar */
|
||||
comp_points.translate (points[comp_index]);
|
||||
/* Apply component transformation & translation (with deltas applied) */
|
||||
item.transform_points (comp_points, matrix, points[comp_index]);
|
||||
|
||||
if (item.is_anchored ())
|
||||
{
|
||||
@ -433,7 +457,8 @@ struct Glyph
|
||||
hb_array_t<contour_point_t> points_left = points.as_array ();
|
||||
for (auto &item : get_var_composite_iterator ())
|
||||
{
|
||||
hb_array_t<contour_point_t> record_points = points_left.sub_array (0, item.get_num_points ());
|
||||
unsigned item_num_points = item.get_num_points ();
|
||||
hb_array_t<contour_point_t> record_points = points_left.sub_array (0, item_num_points);
|
||||
|
||||
comp_points.reset ();
|
||||
|
||||
@ -448,7 +473,7 @@ struct Glyph
|
||||
.get_points (font,
|
||||
glyf_accelerator,
|
||||
comp_points,
|
||||
deltas,
|
||||
points_with_deltas,
|
||||
head_maxp_info,
|
||||
nullptr,
|
||||
shift_points_hori,
|
||||
@ -472,12 +497,12 @@ struct Glyph
|
||||
if (all_points.length > HB_GLYF_MAX_POINTS)
|
||||
return false;
|
||||
|
||||
points_left += item.get_num_points ();
|
||||
points_left += item_num_points;
|
||||
}
|
||||
all_points.extend (phantoms);
|
||||
} break;
|
||||
#endif
|
||||
default:
|
||||
case EMPTY:
|
||||
all_points.extend (phantoms);
|
||||
break;
|
||||
}
|
||||
@ -503,6 +528,8 @@ struct Glyph
|
||||
}
|
||||
|
||||
hb_bytes_t get_bytes () const { return bytes; }
|
||||
glyph_type_t get_type () const { return type; }
|
||||
const GlyphHeader *get_header () const { return header; }
|
||||
|
||||
Glyph () : bytes (),
|
||||
header (bytes.as<GlyphHeader> ()),
|
||||
@ -518,7 +545,9 @@ struct Glyph
|
||||
int num_contours = header->numberOfContours;
|
||||
if (unlikely (num_contours == 0)) type = EMPTY;
|
||||
else if (num_contours > 0) type = SIMPLE;
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
else if (num_contours == -2) type = VAR_COMPOSITE;
|
||||
#endif
|
||||
else type = COMPOSITE; /* negative numbers */
|
||||
}
|
||||
|
||||
@ -526,7 +555,7 @@ struct Glyph
|
||||
hb_bytes_t bytes;
|
||||
const GlyphHeader *header;
|
||||
hb_codepoint_t gid;
|
||||
unsigned type;
|
||||
glyph_type_t type;
|
||||
};
|
||||
|
||||
|
||||
|
@ -34,6 +34,11 @@ struct SimpleGlyph
|
||||
unsigned int length (unsigned int instruction_len) const
|
||||
{ return instruction_len_offset () + 2 + instruction_len; }
|
||||
|
||||
bool has_instructions_length () const
|
||||
{
|
||||
return instruction_len_offset () + 2 <= bytes.length;
|
||||
}
|
||||
|
||||
unsigned int instructions_length () const
|
||||
{
|
||||
unsigned int instruction_length_offset = instruction_len_offset ();
|
||||
@ -94,6 +99,7 @@ struct SimpleGlyph
|
||||
/* zero instruction length */
|
||||
void drop_hints ()
|
||||
{
|
||||
if (!has_instructions_length ()) return;
|
||||
GlyphHeader &glyph_header = const_cast<GlyphHeader &> (header);
|
||||
(HBUINT16 &) StructAtOffset<HBUINT16> (&glyph_header, instruction_len_offset ()) = 0;
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ struct SubsetGlyph
|
||||
Glyph source_glyph;
|
||||
hb_bytes_t dest_start; /* region of source_glyph to copy first */
|
||||
hb_bytes_t dest_end; /* region of source_glyph to copy second */
|
||||
bool allocated;
|
||||
|
||||
bool serialize (hb_serialize_context_t *c,
|
||||
bool use_short_loca,
|
||||
@ -26,7 +27,12 @@ struct SubsetGlyph
|
||||
TRACE_SERIALIZE (this);
|
||||
|
||||
hb_bytes_t dest_glyph = dest_start.copy (c);
|
||||
dest_glyph = hb_bytes_t (&dest_glyph, dest_glyph.length + dest_end.copy (c).length);
|
||||
hb_bytes_t end_copy = dest_end.copy (c);
|
||||
if (!end_copy.arrayZ || !dest_glyph.arrayZ) {
|
||||
return false;
|
||||
}
|
||||
|
||||
dest_glyph = hb_bytes_t (&dest_glyph, dest_glyph.length + end_copy.length);
|
||||
unsigned int pad_length = use_short_loca ? padding () : 0;
|
||||
DEBUG_MSG (SUBSET, nullptr, "serialize %u byte glyph, width %u pad %u", dest_glyph.length, dest_glyph.length + pad_length, pad_length);
|
||||
|
||||
@ -40,13 +46,68 @@ struct SubsetGlyph
|
||||
|
||||
if (unlikely (!dest_glyph.length)) return_trace (true);
|
||||
|
||||
/* update components gids */
|
||||
/* update components gids. */
|
||||
for (auto &_ : Glyph (dest_glyph).get_composite_iterator ())
|
||||
{
|
||||
hb_codepoint_t new_gid;
|
||||
if (plan->new_gid_for_old_gid (_.get_gid(), &new_gid))
|
||||
const_cast<CompositeGlyphRecord &> (_).set_gid (new_gid);
|
||||
}
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
for (auto &_ : Glyph (dest_glyph).get_var_composite_iterator ())
|
||||
{
|
||||
hb_codepoint_t new_gid;
|
||||
if (plan->new_gid_for_old_gid (_.get_gid(), &new_gid))
|
||||
const_cast<VarCompositeGlyphRecord &> (_).set_gid (new_gid);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
auto it = Glyph (dest_glyph).get_composite_iterator ();
|
||||
if (it)
|
||||
{
|
||||
/* lower GID24 to GID16 in components if possible.
|
||||
*
|
||||
* TODO: VarComposite. Not as critical, since VarComposite supports
|
||||
* gid24 from the first version. */
|
||||
char *p = it ? (char *) &*it : nullptr;
|
||||
char *q = p;
|
||||
const char *end = dest_glyph.arrayZ + dest_glyph.length;
|
||||
while (it)
|
||||
{
|
||||
auto &rec = const_cast<CompositeGlyphRecord &> (*it);
|
||||
++it;
|
||||
|
||||
q += rec.get_size ();
|
||||
|
||||
rec.lower_gid_24_to_16 ();
|
||||
|
||||
unsigned size = rec.get_size ();
|
||||
|
||||
memmove (p, &rec, size);
|
||||
|
||||
p += size;
|
||||
}
|
||||
memmove (p, q, end - q);
|
||||
p += end - q;
|
||||
|
||||
/* We want to shorten the glyph, but we can't do that without
|
||||
* updating the length in the loca table, which is already
|
||||
* written out :-(. So we just fill the rest of the glyph with
|
||||
* harmless instructions, since that's what they will be
|
||||
* interpreted as.
|
||||
*
|
||||
* Should move the lowering to _populate_subset_glyphs() to
|
||||
* fix this issue. */
|
||||
|
||||
hb_memset (p, 0x7A /* TrueType instruction ROFF; harmless */, end - p);
|
||||
p += end - p;
|
||||
dest_glyph = hb_bytes_t (dest_glyph.arrayZ, p - (char *) dest_glyph.arrayZ);
|
||||
|
||||
// TODO: Padding; & trim serialized bytes.
|
||||
// TODO: Update length in loca. Ugh.
|
||||
}
|
||||
#endif
|
||||
|
||||
if (plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
|
||||
Glyph (dest_glyph).drop_hints ();
|
||||
@ -60,12 +121,18 @@ struct SubsetGlyph
|
||||
bool compile_bytes_with_deltas (const hb_subset_plan_t *plan,
|
||||
hb_font_t *font,
|
||||
const glyf_accelerator_t &glyf)
|
||||
{ return source_glyph.compile_bytes_with_deltas (plan, font, glyf, dest_start, dest_end); }
|
||||
{
|
||||
allocated = source_glyph.compile_bytes_with_deltas (plan, font, glyf, dest_start, dest_end);
|
||||
return allocated;
|
||||
}
|
||||
|
||||
void free_compiled_bytes ()
|
||||
{
|
||||
dest_start.fini ();
|
||||
dest_end.fini ();
|
||||
if (likely (allocated)) {
|
||||
allocated = false;
|
||||
dest_start.fini ();
|
||||
dest_end.fini ();
|
||||
}
|
||||
}
|
||||
|
||||
void drop_hints_bytes ()
|
||||
|
@ -27,7 +27,7 @@ struct VarCompositeGlyphRecord
|
||||
HAVE_SKEW_Y = 0x0200,
|
||||
HAVE_TCENTER_X = 0x0400,
|
||||
HAVE_TCENTER_Y = 0x0800,
|
||||
GID_IS_24 = 0x1000,
|
||||
GID_IS_24BIT = 0x1000,
|
||||
AXES_HAVE_VARIATION = 0x2000,
|
||||
RESET_UNSPECIFIED_AXES = 0x4000,
|
||||
};
|
||||
@ -43,7 +43,7 @@ struct VarCompositeGlyphRecord
|
||||
|
||||
// gid
|
||||
size += 2;
|
||||
if (flags & GID_IS_24) size += 1;
|
||||
if (flags & GID_IS_24BIT) size += 1;
|
||||
|
||||
if (flags & HAVE_TRANSLATE_X) size += 2;
|
||||
if (flags & HAVE_TRANSLATE_Y) size += 2;
|
||||
@ -65,12 +65,20 @@ struct VarCompositeGlyphRecord
|
||||
|
||||
hb_codepoint_t get_gid () const
|
||||
{
|
||||
if (flags & GID_IS_24)
|
||||
if (flags & GID_IS_24BIT)
|
||||
return StructAfter<const HBGlyphID24> (numAxes);
|
||||
else
|
||||
return StructAfter<const HBGlyphID16> (numAxes);
|
||||
}
|
||||
|
||||
void set_gid (hb_codepoint_t gid)
|
||||
{
|
||||
if (flags & GID_IS_24BIT)
|
||||
StructAfter<HBGlyphID24> (numAxes) = gid;
|
||||
else
|
||||
StructAfter<HBGlyphID16> (numAxes) = gid;
|
||||
}
|
||||
|
||||
unsigned get_numAxes () const
|
||||
{
|
||||
return numAxes;
|
||||
@ -145,7 +153,7 @@ struct VarCompositeGlyphRecord
|
||||
float rotation)
|
||||
{
|
||||
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L240
|
||||
rotation = rotation * float (M_PI);
|
||||
rotation = rotation * HB_PI;
|
||||
float c = cosf (rotation);
|
||||
float s = sinf (rotation);
|
||||
float other[6] = {c, s, -s, c, 0.f, 0.f};
|
||||
@ -156,8 +164,8 @@ struct VarCompositeGlyphRecord
|
||||
float skewX, float skewY)
|
||||
{
|
||||
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L255
|
||||
skewX = skewX * float (M_PI);
|
||||
skewY = skewY * float (M_PI);
|
||||
skewX = skewX * HB_PI;
|
||||
skewY = skewY * HB_PI;
|
||||
float other[6] = {1.f, tanf (skewY), tanf (skewX), 1.f, 0.f, 0.f};
|
||||
transform (matrix, trans, other);
|
||||
}
|
||||
@ -174,16 +182,18 @@ struct VarCompositeGlyphRecord
|
||||
float tCenterX = 0.f;
|
||||
float tCenterY = 0.f;
|
||||
|
||||
if (unlikely (!points.resize (points.length + get_num_points ()))) return false;
|
||||
unsigned num_points = get_num_points ();
|
||||
|
||||
if (unlikely (!points.resize (points.length + num_points))) return false;
|
||||
|
||||
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 2 : 1;
|
||||
unsigned axes_size = numAxes * axis_width;
|
||||
|
||||
const F2DOT14 *q = (const F2DOT14 *) (axes_size +
|
||||
(flags & GID_IS_24 ? 3 : 2) +
|
||||
(flags & GID_IS_24BIT ? 3 : 2) +
|
||||
&StructAfter<const HBUINT8> (numAxes));
|
||||
|
||||
hb_array_t<contour_point_t> rec_points = points.as_array ().sub_array (points.length - get_num_points ());
|
||||
hb_array_t<contour_point_t> rec_points = points.as_array ().sub_array (points.length - num_points);
|
||||
|
||||
unsigned count = numAxes;
|
||||
if (flags & AXES_HAVE_VARIATION)
|
||||
@ -308,8 +318,8 @@ struct VarCompositeGlyphRecord
|
||||
bool have_variations = flags & AXES_HAVE_VARIATION;
|
||||
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 2 : 1;
|
||||
|
||||
const HBUINT8 *p = (const HBUINT8 *) (((HBUINT8 *) &numAxes) + numAxes.static_size + (flags & GID_IS_24 ? 3 : 2));
|
||||
const HBUINT16 *q = (const HBUINT16 *) (((HBUINT8 *) &numAxes) + numAxes.static_size + (flags & GID_IS_24 ? 3 : 2));
|
||||
const HBUINT8 *p = (const HBUINT8 *) (((HBUINT8 *) &numAxes) + numAxes.static_size + (flags & GID_IS_24BIT ? 3 : 2));
|
||||
const HBUINT16 *q = (const HBUINT16 *) (((HBUINT8 *) &numAxes) + numAxes.static_size + (flags & GID_IS_24BIT ? 3 : 2));
|
||||
|
||||
const F2DOT14 *a = (const F2DOT14 *) ((HBUINT8 *) (axis_width == 1 ? (p + numAxes) : (HBUINT8 *) (q + numAxes)));
|
||||
|
||||
@ -344,6 +354,13 @@ struct VarCompositeGlyph
|
||||
var_composite_iter_t iter () const
|
||||
{ return var_composite_iter_t (bytes, &StructAfter<VarCompositeGlyphRecord, GlyphHeader> (header)); }
|
||||
|
||||
const hb_bytes_t trim_padding () const
|
||||
{
|
||||
unsigned length = GlyphHeader::static_size;
|
||||
for (auto &comp : iter ())
|
||||
length += comp.get_size ();
|
||||
return bytes.sub_array (0, length);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -31,6 +31,12 @@ struct glyf
|
||||
|
||||
static constexpr hb_tag_t tableTag = HB_OT_TAG_glyf;
|
||||
|
||||
static bool has_valid_glyf_format(const hb_face_t* face)
|
||||
{
|
||||
const OT::head &head = *face->table.head;
|
||||
return head.indexToLocFormat <= 1 && head.glyphDataFormat <= 1;
|
||||
}
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c HB_UNUSED) const
|
||||
{
|
||||
TRACE_SANITIZE (this);
|
||||
@ -72,6 +78,13 @@ struct glyf
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
|
||||
if (!has_valid_glyf_format (c->plan->source)) {
|
||||
// glyf format is unknown don't attempt to subset it.
|
||||
DEBUG_MSG (SUBSET, nullptr,
|
||||
"unkown glyf format, dropping from subset.");
|
||||
return_trace (false);
|
||||
}
|
||||
|
||||
glyf *glyf_prime = c->serializer->start_embed <glyf> ();
|
||||
if (unlikely (!c->serializer->check_success (glyf_prime))) return_trace (false);
|
||||
|
||||
@ -85,11 +98,17 @@ struct glyf
|
||||
hb_vector_t<unsigned> padded_offsets;
|
||||
unsigned num_glyphs = c->plan->num_output_glyphs ();
|
||||
if (unlikely (!padded_offsets.resize (num_glyphs)))
|
||||
{
|
||||
hb_font_destroy (font);
|
||||
return false;
|
||||
}
|
||||
|
||||
hb_vector_t<glyf_impl::SubsetGlyph> glyphs;
|
||||
if (!_populate_subset_glyphs (c->plan, font, glyphs))
|
||||
{
|
||||
hb_font_destroy (font);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (font)
|
||||
hb_font_destroy (font);
|
||||
@ -112,7 +131,7 @@ struct glyf
|
||||
|
||||
bool result = glyf_prime->serialize (c->serializer, glyphs.writer (), use_short_loca, c->plan);
|
||||
if (c->plan->normalized_coords && !c->plan->pinned_at_default)
|
||||
_free_compiled_subset_glyphs (glyphs, glyphs.length - 1);
|
||||
_free_compiled_subset_glyphs (glyphs);
|
||||
|
||||
if (!result) return false;
|
||||
|
||||
@ -131,9 +150,9 @@ struct glyf
|
||||
hb_font_t *
|
||||
_create_font_for_instancing (const hb_subset_plan_t *plan) const;
|
||||
|
||||
void _free_compiled_subset_glyphs (hb_vector_t<glyf_impl::SubsetGlyph> &glyphs, unsigned index) const
|
||||
void _free_compiled_subset_glyphs (hb_vector_t<glyf_impl::SubsetGlyph> &glyphs) const
|
||||
{
|
||||
for (unsigned i = 0; i <= index && i < glyphs.length; i++)
|
||||
for (unsigned i = 0; i < glyphs.length; i++)
|
||||
glyphs[i].free_compiled_bytes ();
|
||||
}
|
||||
|
||||
@ -162,7 +181,7 @@ struct glyf_accelerator_t
|
||||
vmtx = nullptr;
|
||||
#endif
|
||||
const OT::head &head = *face->table.head;
|
||||
if (head.indexToLocFormat > 1 || head.glyphDataFormat > 0)
|
||||
if (!glyf::has_valid_glyf_format (face))
|
||||
/* Unknown format. Leave num_glyphs=0, that takes care of disabling us. */
|
||||
return;
|
||||
short_offset = 0 == head.indexToLocFormat;
|
||||
@ -222,6 +241,8 @@ struct glyf_accelerator_t
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
#ifndef HB_NO_VAR
|
||||
struct points_aggregator_t
|
||||
{
|
||||
@ -285,7 +306,6 @@ struct glyf_accelerator_t
|
||||
contour_point_t *get_phantoms_sink () { return phantoms; }
|
||||
};
|
||||
|
||||
public:
|
||||
unsigned
|
||||
get_advance_with_var_unscaled (hb_font_t *font, hb_codepoint_t gid, bool is_vertical) const
|
||||
{
|
||||
@ -327,6 +347,15 @@ struct glyf_accelerator_t
|
||||
}
|
||||
#endif
|
||||
|
||||
bool get_leading_bearing_without_var_unscaled (hb_codepoint_t gid, bool is_vertical, int *lsb) const
|
||||
{
|
||||
if (unlikely (gid >= num_glyphs)) return false;
|
||||
if (is_vertical) return false; // TODO Humm, what to do here?
|
||||
|
||||
*lsb = glyph_for_gid (gid).get_header ()->xMin;
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
bool get_extents (hb_font_t *font, hb_codepoint_t gid, hb_glyph_extents_t *extents) const
|
||||
{
|
||||
@ -405,7 +434,6 @@ glyf::_populate_subset_glyphs (const hb_subset_plan_t *plan,
|
||||
unsigned num_glyphs = plan->num_output_glyphs ();
|
||||
if (!glyphs.resize (num_glyphs)) return false;
|
||||
|
||||
unsigned idx = 0;
|
||||
for (auto p : plan->glyph_map->iter ())
|
||||
{
|
||||
unsigned new_gid = p.second;
|
||||
@ -433,11 +461,10 @@ glyf::_populate_subset_glyphs (const hb_subset_plan_t *plan,
|
||||
if (unlikely (!subset_glyph.compile_bytes_with_deltas (plan, font, glyf)))
|
||||
{
|
||||
// when pinned at default, only bounds are updated, thus no need to free
|
||||
if (!plan->pinned_at_default && idx > 0)
|
||||
_free_compiled_subset_glyphs (glyphs, idx - 1);
|
||||
if (!plan->pinned_at_default)
|
||||
_free_compiled_subset_glyphs (glyphs);
|
||||
return false;
|
||||
}
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
@ -451,7 +478,10 @@ glyf::_create_font_for_instancing (const hb_subset_plan_t *plan) const
|
||||
|
||||
hb_vector_t<hb_variation_t> vars;
|
||||
if (unlikely (!vars.alloc (plan->user_axes_location.get_population (), true)))
|
||||
{
|
||||
hb_font_destroy (font);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
for (auto _ : plan->user_axes_location)
|
||||
{
|
||||
|
@ -26,13 +26,13 @@ struct path_builder_t
|
||||
|
||||
optional_point_t lerp (optional_point_t p, float t)
|
||||
{ return optional_point_t (x + t * (p.x - x), y + t * (p.y - y)); }
|
||||
} first_oncurve, first_offcurve, last_offcurve, last_offcurve2;
|
||||
} first_oncurve, first_offcurve, first_offcurve2, last_offcurve, last_offcurve2;
|
||||
|
||||
path_builder_t (hb_font_t *font_, hb_draw_session_t &draw_session_)
|
||||
{
|
||||
font = font_;
|
||||
draw_session = &draw_session_;
|
||||
first_oncurve = first_offcurve = last_offcurve = last_offcurve2 = optional_point_t ();
|
||||
first_oncurve = first_offcurve = first_offcurve2 = last_offcurve = last_offcurve2 = optional_point_t ();
|
||||
}
|
||||
|
||||
/* based on https://github.com/RazrFalcon/ttf-parser/blob/4f32821/src/glyf.rs#L287
|
||||
@ -40,7 +40,7 @@ struct path_builder_t
|
||||
* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html
|
||||
* https://stackoverflow.com/a/20772557
|
||||
*
|
||||
* Cubic support added (incomplete). */
|
||||
* Cubic support added. */
|
||||
void consume_point (const contour_point_t &point)
|
||||
{
|
||||
bool is_on_curve = point.flag & glyf_impl::SimpleGlyph::FLAG_ON_CURVE;
|
||||
@ -59,7 +59,12 @@ struct path_builder_t
|
||||
}
|
||||
else
|
||||
{
|
||||
if (first_offcurve)
|
||||
if (is_cubic && !first_offcurve2)
|
||||
{
|
||||
first_offcurve2 = first_offcurve;
|
||||
first_offcurve = p;
|
||||
}
|
||||
else if (first_offcurve)
|
||||
{
|
||||
optional_point_t mid = first_offcurve.lerp (p, .5f);
|
||||
first_oncurve = mid;
|
||||
@ -126,16 +131,30 @@ struct path_builder_t
|
||||
{
|
||||
if (first_offcurve && last_offcurve)
|
||||
{
|
||||
optional_point_t mid = last_offcurve.lerp (first_offcurve, .5f);
|
||||
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
|
||||
mid.x, mid.y);
|
||||
optional_point_t mid = last_offcurve.lerp (first_offcurve2 ?
|
||||
first_offcurve2 :
|
||||
first_offcurve, .5f);
|
||||
if (last_offcurve2)
|
||||
draw_session->cubic_to (last_offcurve2.x, last_offcurve2.y,
|
||||
last_offcurve.x, last_offcurve.y,
|
||||
mid.x, mid.y);
|
||||
else
|
||||
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
|
||||
mid.x, mid.y);
|
||||
last_offcurve = optional_point_t ();
|
||||
/* now check the rest */
|
||||
}
|
||||
/* now check the rest */
|
||||
|
||||
if (first_offcurve && first_oncurve)
|
||||
draw_session->quadratic_to (first_offcurve.x, first_offcurve.y,
|
||||
first_oncurve.x, first_oncurve.y);
|
||||
{
|
||||
if (first_offcurve2)
|
||||
draw_session->cubic_to (first_offcurve2.x, first_offcurve2.y,
|
||||
first_offcurve.x, first_offcurve.y,
|
||||
first_oncurve.x, first_oncurve.y);
|
||||
else
|
||||
draw_session->quadratic_to (first_offcurve.x, first_offcurve.y,
|
||||
first_oncurve.x, first_oncurve.y);
|
||||
}
|
||||
else if (last_offcurve && first_oncurve)
|
||||
{
|
||||
if (last_offcurve2)
|
||||
|
@ -81,6 +81,7 @@ STEP 5: REPLACE TABS & REMOVE TRAILING SPACES
|
||||
To clean up the extra spaces and tabs run the following script at
|
||||
each folder level within libharfbuzz.
|
||||
|
||||
shopt -s nullglob
|
||||
for f in *.c *.h *.cc *.hh;
|
||||
do
|
||||
# replace tabs with spaces
|
||||
|
@ -464,7 +464,8 @@ enum { DELETED_GLYPH = 0xFFFF };
|
||||
template <typename T>
|
||||
struct Entry
|
||||
{
|
||||
bool sanitize (hb_sanitize_context_t *c, unsigned int count) const
|
||||
// This does seem like it's ever called.
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
{
|
||||
TRACE_SANITIZE (this);
|
||||
/* Note, we don't recurse-sanitize data because we don't access it.
|
||||
@ -492,7 +493,8 @@ struct Entry
|
||||
template <>
|
||||
struct Entry<void>
|
||||
{
|
||||
bool sanitize (hb_sanitize_context_t *c, unsigned int count /*XXX Unused?*/) const
|
||||
// This does seem like it's ever called.
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
{
|
||||
TRACE_SANITIZE (this);
|
||||
return_trace (c->check_struct (this));
|
||||
|
@ -110,9 +110,10 @@ struct BEInt<Type, 2>
|
||||
constexpr operator Type () const
|
||||
{
|
||||
#if defined(__OPTIMIZE__) && !defined(HB_NO_PACKED) && \
|
||||
((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__)) && \
|
||||
defined(__BYTE_ORDER) && \
|
||||
(__BYTE_ORDER == __LITTLE_ENDIAN || __BYTE_ORDER == __BIG_ENDIAN)
|
||||
(__BYTE_ORDER == __BIG_ENDIAN || \
|
||||
(__BYTE_ORDER == __LITTLE_ENDIAN && \
|
||||
hb_has_builtin(__builtin_bswap16)))
|
||||
/* Spoon-feed the compiler a big-endian integer with alignment 1.
|
||||
* https://github.com/harfbuzz/harfbuzz/pull/1398 */
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
@ -155,9 +156,10 @@ struct BEInt<Type, 4>
|
||||
struct __attribute__((packed)) packed_uint32_t { uint32_t v; };
|
||||
constexpr operator Type () const {
|
||||
#if defined(__OPTIMIZE__) && !defined(HB_NO_PACKED) && \
|
||||
((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__)) && \
|
||||
defined(__BYTE_ORDER) && \
|
||||
(__BYTE_ORDER == __LITTLE_ENDIAN || __BYTE_ORDER == __BIG_ENDIAN)
|
||||
(__BYTE_ORDER == __BIG_ENDIAN || \
|
||||
(__BYTE_ORDER == __LITTLE_ENDIAN && \
|
||||
hb_has_builtin(__builtin_bswap32)))
|
||||
/* Spoon-feed the compiler a big-endian integer with alignment 1.
|
||||
* https://github.com/harfbuzz/harfbuzz/pull/1398 */
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
@ -598,13 +600,17 @@ template <typename T>
|
||||
static inline unsigned int
|
||||
hb_popcount (T v)
|
||||
{
|
||||
#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
|
||||
#if hb_has_builtin(__builtin_popcount)
|
||||
if (sizeof (T) <= sizeof (unsigned int))
|
||||
return __builtin_popcount (v);
|
||||
#endif
|
||||
|
||||
#if hb_has_builtin(__builtin_popcountl)
|
||||
if (sizeof (T) <= sizeof (unsigned long))
|
||||
return __builtin_popcountl (v);
|
||||
#endif
|
||||
|
||||
#if hb_has_builtin(__builtin_popcountll)
|
||||
if (sizeof (T) <= sizeof (unsigned long long))
|
||||
return __builtin_popcountll (v);
|
||||
#endif
|
||||
@ -641,13 +647,17 @@ hb_bit_storage (T v)
|
||||
{
|
||||
if (unlikely (!v)) return 0;
|
||||
|
||||
#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
|
||||
#if hb_has_builtin(__builtin_clz)
|
||||
if (sizeof (T) <= sizeof (unsigned int))
|
||||
return sizeof (unsigned int) * 8 - __builtin_clz (v);
|
||||
#endif
|
||||
|
||||
#if hb_has_builtin(__builtin_clzl)
|
||||
if (sizeof (T) <= sizeof (unsigned long))
|
||||
return sizeof (unsigned long) * 8 - __builtin_clzl (v);
|
||||
#endif
|
||||
|
||||
#if hb_has_builtin(__builtin_clzll)
|
||||
if (sizeof (T) <= sizeof (unsigned long long))
|
||||
return sizeof (unsigned long long) * 8 - __builtin_clzll (v);
|
||||
#endif
|
||||
@ -715,13 +725,17 @@ hb_ctz (T v)
|
||||
{
|
||||
if (unlikely (!v)) return 8 * sizeof (T);
|
||||
|
||||
#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
|
||||
#if hb_has_builtin(__builtin_ctz)
|
||||
if (sizeof (T) <= sizeof (unsigned int))
|
||||
return __builtin_ctz (v);
|
||||
#endif
|
||||
|
||||
#if hb_has_builtin(__builtin_ctzl)
|
||||
if (sizeof (T) <= sizeof (unsigned long))
|
||||
return __builtin_ctzl (v);
|
||||
#endif
|
||||
|
||||
#if hb_has_builtin(__builtin_ctzll)
|
||||
if (sizeof (T) <= sizeof (unsigned long long))
|
||||
return __builtin_ctzll (v);
|
||||
#endif
|
||||
@ -875,8 +889,7 @@ hb_in_ranges (T u, T lo1, T hi1, Ts... ds)
|
||||
static inline bool
|
||||
hb_unsigned_mul_overflows (unsigned int count, unsigned int size, unsigned *result = nullptr)
|
||||
{
|
||||
/* avoid with xlc16 clang on AIX; it sets the gcc macros */
|
||||
#if (defined(__GNUC__) && !defined(AIX) && (__GNUC__ >= 4)) || (defined(__clang__) && (__clang_major__ >= 8))
|
||||
#if hb_has_builtin(__builtin_mul_overflow)
|
||||
unsigned stack_result;
|
||||
if (!result)
|
||||
result = &stack_result;
|
||||
@ -1331,4 +1344,62 @@ struct
|
||||
HB_FUNCOBJ (hb_dec);
|
||||
|
||||
|
||||
/* Adapted from kurbo implementation with extra parameters added,
|
||||
* and finding for a particular range instead of 0.
|
||||
*
|
||||
* For documentation and implementation see:
|
||||
*
|
||||
* [ITP method]: https://en.wikipedia.org/wiki/ITP_Method
|
||||
* [An Enhancement of the Bisection Method Average Performance Preserving Minmax Optimality]: https://dl.acm.org/doi/10.1145/3423597
|
||||
* https://docs.rs/kurbo/0.8.1/kurbo/common/fn.solve_itp.html
|
||||
* https://github.com/linebender/kurbo/blob/fd839c25ea0c98576c7ce5789305822675a89938/src/common.rs#L162-L248
|
||||
*/
|
||||
template <typename func_t>
|
||||
double solve_itp (func_t f,
|
||||
double a, double b,
|
||||
double epsilon,
|
||||
double min_y, double max_y,
|
||||
double &ya, double &yb, double &y)
|
||||
{
|
||||
unsigned n1_2 = (unsigned) (hb_max (ceil (log2 ((b - a) / epsilon)) - 1.0, 0.0));
|
||||
const unsigned n0 = 1; // Hardwired
|
||||
const double k1 = 0.2 / (b - a); // Hardwired.
|
||||
unsigned nmax = n0 + n1_2;
|
||||
double scaled_epsilon = epsilon * double (1llu << nmax);
|
||||
double _2_epsilon = 2.0 * epsilon;
|
||||
while (b - a > _2_epsilon)
|
||||
{
|
||||
double x1_2 = 0.5 * (a + b);
|
||||
double r = scaled_epsilon - 0.5 * (b - a);
|
||||
double xf = (yb * a - ya * b) / (yb - ya);
|
||||
double sigma = x1_2 - xf;
|
||||
double b_a = b - a;
|
||||
// This has k2 = 2 hardwired for efficiency.
|
||||
double b_a_k2 = b_a * b_a;
|
||||
double delta = k1 * b_a_k2;
|
||||
int sigma_sign = sigma >= 0 ? +1 : -1;
|
||||
double xt = delta <= fabs (x1_2 - xf) ? xf + delta * sigma_sign : x1_2;
|
||||
double xitp = fabs (xt - x1_2) <= r ? xt : x1_2 - r * sigma_sign;
|
||||
double yitp = f (xitp);
|
||||
if (yitp > max_y)
|
||||
{
|
||||
b = xitp;
|
||||
yb = yitp;
|
||||
}
|
||||
else if (yitp < min_y)
|
||||
{
|
||||
a = xitp;
|
||||
ya = yitp;
|
||||
}
|
||||
else
|
||||
{
|
||||
y = yitp;
|
||||
return xitp;
|
||||
}
|
||||
scaled_epsilon *= 0.5;
|
||||
}
|
||||
return 0.5 * (a + b);
|
||||
}
|
||||
|
||||
|
||||
#endif /* HB_ALGS_HH */
|
||||
|
@ -83,9 +83,15 @@ struct hb_bimap_t
|
||||
|
||||
unsigned int get_population () const { return forw_map.get_population (); }
|
||||
|
||||
|
||||
protected:
|
||||
hb_map_t forw_map;
|
||||
hb_map_t back_map;
|
||||
|
||||
public:
|
||||
auto keys () const HB_AUTO_RETURN (+ forw_map.keys())
|
||||
auto values () const HB_AUTO_RETURN (+ forw_map.values())
|
||||
auto iter () const HB_AUTO_RETURN (+ forw_map.iter())
|
||||
};
|
||||
|
||||
/* Inremental bimap: only lhs is given, rhs is incrementally assigned */
|
||||
@ -108,6 +114,9 @@ struct hb_inc_bimap_t : hb_bimap_t
|
||||
hb_codepoint_t skip ()
|
||||
{ return next_value++; }
|
||||
|
||||
hb_codepoint_t skip (unsigned count)
|
||||
{ return next_value += count; }
|
||||
|
||||
hb_codepoint_t get_next_value () const
|
||||
{ return next_value; }
|
||||
|
||||
|
@ -194,7 +194,7 @@ struct hb_bit_set_t
|
||||
unsigned int end = major_start (m + 1);
|
||||
do
|
||||
{
|
||||
if (v || page) /* The v check is to optimize out the page check if v is true. */
|
||||
if (g != INVALID && (v || page)) /* The v check is to optimize out the page check if v is true. */
|
||||
page->set (g, v);
|
||||
|
||||
array = &StructAtOffsetUnaligned<T> (array, stride);
|
||||
@ -238,7 +238,7 @@ struct hb_bit_set_t
|
||||
if (g < last_g) return false;
|
||||
last_g = g;
|
||||
|
||||
if (v || page) /* The v check is to optimize out the page check if v is true. */
|
||||
if (g != INVALID && (v || page)) /* The v check is to optimize out the page check if v is true. */
|
||||
page->add (g);
|
||||
|
||||
array = &StructAtOffsetUnaligned<T> (array, stride);
|
||||
|
@ -63,7 +63,7 @@ HB_BEGIN_DECLS
|
||||
* HarfBuzz and doing that just once (no reuse!),
|
||||
*
|
||||
* - If the font is mmap()ed, it's okay to use
|
||||
* @HB_MEMORY_READONLY_MAY_MAKE_WRITABLE, however, using that mode
|
||||
* @HB_MEMORY_MODE_READONLY_MAY_MAKE_WRITABLE, however, using that mode
|
||||
* correctly is very tricky. Use @HB_MEMORY_MODE_READONLY instead.
|
||||
**/
|
||||
typedef enum {
|
||||
|
@ -40,6 +40,11 @@
|
||||
* Buffers serve a dual role in HarfBuzz; before shaping, they hold
|
||||
* the input characters that are passed to hb_shape(), and after
|
||||
* shaping they hold the output glyphs.
|
||||
*
|
||||
* The input buffer is a sequence of Unicode codepoints, with
|
||||
* associated attributes such as direction and script. The output
|
||||
* buffer is a sequence of glyphs, with associated attributes such
|
||||
* as position and cluster.
|
||||
**/
|
||||
|
||||
|
||||
|
@ -30,7 +30,19 @@
|
||||
#include "hb.hh"
|
||||
|
||||
|
||||
/* Implements a lockfree cache for int->int functions. */
|
||||
/* Implements a lockfree cache for int->int functions.
|
||||
*
|
||||
* The cache is a fixed-size array of 16-bit or 32-bit integers.
|
||||
* The key is split into two parts: the cache index and the rest.
|
||||
*
|
||||
* The cache index is used to index into the array. The rest is used
|
||||
* to store the key and the value.
|
||||
*
|
||||
* The value is stored in the least significant bits of the integer.
|
||||
* The key is stored in the most significant bits of the integer.
|
||||
* The key is shifted by cache_bits to the left to make room for the
|
||||
* value.
|
||||
*/
|
||||
|
||||
template <unsigned int key_bits=16,
|
||||
unsigned int value_bits=8 + 32 - key_bits,
|
||||
|
@ -632,6 +632,7 @@ hb_script_get_horizontal_direction (hb_script_t script)
|
||||
case HB_SCRIPT_OLD_HUNGARIAN:
|
||||
case HB_SCRIPT_OLD_ITALIC:
|
||||
case HB_SCRIPT_RUNIC:
|
||||
case HB_SCRIPT_TIFINAGH:
|
||||
|
||||
return HB_DIRECTION_INVALID;
|
||||
}
|
||||
|
@ -136,6 +136,10 @@
|
||||
#define HB_NO_SUBSET_CFF
|
||||
#endif
|
||||
|
||||
#ifdef HB_NO_DRAW
|
||||
#define HB_NO_OUTLINE
|
||||
#endif
|
||||
|
||||
#ifdef HB_NO_GETENV
|
||||
#define HB_NO_UNISCRIBE_BUG_COMPATIBLE
|
||||
#endif
|
||||
|
@ -373,6 +373,10 @@ struct hb_no_trace_t {
|
||||
#define HB_DEBUG_FT (HB_DEBUG+0)
|
||||
#endif
|
||||
|
||||
#ifndef HB_DEBUG_JUSTIFY
|
||||
#define HB_DEBUG_JUSTIFY (HB_DEBUG+0)
|
||||
#endif
|
||||
|
||||
#ifndef HB_DEBUG_OBJECT
|
||||
#define HB_DEBUG_OBJECT (HB_DEBUG+0)
|
||||
#endif
|
||||
|
@ -108,6 +108,16 @@ hb_font_funcs_set_glyph_func (hb_font_funcs_t *ffuncs,
|
||||
hb_font_get_glyph_func_t func,
|
||||
void *user_data, hb_destroy_func_t destroy);
|
||||
|
||||
/* https://github.com/harfbuzz/harfbuzz/pull/4207 */
|
||||
/**
|
||||
* HB_UNICODE_COMBINING_CLASS_CCC133:
|
||||
*
|
||||
* [Tibetan]
|
||||
*
|
||||
* Deprecated: 7.2.0
|
||||
**/
|
||||
#define HB_UNICODE_COMBINING_CLASS_CCC133 133
|
||||
|
||||
/**
|
||||
* hb_unicode_eastasian_width_func_t:
|
||||
* @ufuncs: A Unicode-functions structure
|
||||
@ -247,6 +257,7 @@ hb_font_get_glyph_v_kerning (hb_font_t *font,
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
HB_END_DECLS
|
||||
|
||||
#endif /* HB_DEPRECATED_H */
|
||||
|
@ -47,6 +47,12 @@
|
||||
* More precisely, a font face represents a single face in a binary font file.
|
||||
* Font faces are typically built from a binary blob and a face index.
|
||||
* Font faces are used to create fonts.
|
||||
*
|
||||
* A font face can be created from a binary blob using hb_face_create().
|
||||
* The face index is used to select a face from a binary blob that contains
|
||||
* multiple faces. For example, a binary blob that contains both a regular
|
||||
* and a bold face can be used to create two font faces, one for each face
|
||||
* index.
|
||||
**/
|
||||
|
||||
|
||||
@ -197,7 +203,7 @@ _hb_face_for_data_reference_table (hb_face_t *face HB_UNUSED, hb_tag_t tag, void
|
||||
* a face index into that blob.
|
||||
*
|
||||
* The face index is used for blobs of file formats such as TTC and
|
||||
* and DFont that can contain more than one face. Face indices within
|
||||
* DFont that can contain more than one face. Face indices within
|
||||
* such collections are zero-based.
|
||||
*
|
||||
* <note>Note: If the blob font format is not a collection, @index
|
||||
|
@ -76,7 +76,7 @@ struct hb_face_t
|
||||
if (unlikely (!reference_table_func))
|
||||
return hb_blob_get_empty ();
|
||||
|
||||
blob = reference_table_func (/*XXX*/const_cast<hb_face_t *> (this), tag, user_data);
|
||||
blob = reference_table_func (/*Oh, well.*/const_cast<hb_face_t *> (this), tag, user_data);
|
||||
if (unlikely (!blob))
|
||||
return hb_blob_get_empty ();
|
||||
|
||||
|
@ -59,6 +59,11 @@
|
||||
*
|
||||
* HarfBuzz provides a built-in set of lightweight default
|
||||
* functions for each method in #hb_font_funcs_t.
|
||||
*
|
||||
* The default font functions are implemented in terms of the
|
||||
* #hb_font_funcs_t methods of the parent font object. This allows
|
||||
* client programs to override only the methods they need to, and
|
||||
* otherwise inherit the parent font's implementation, if any.
|
||||
**/
|
||||
|
||||
|
||||
@ -1387,7 +1392,7 @@ hb_font_get_glyph_from_name (hb_font_t *font,
|
||||
/**
|
||||
* hb_font_get_glyph_shape:
|
||||
* @font: #hb_font_t to work upon
|
||||
* @glyph: : The glyph ID
|
||||
* @glyph: The glyph ID
|
||||
* @dfuncs: #hb_draw_funcs_t to draw to
|
||||
* @draw_data: User data to pass to draw callbacks
|
||||
*
|
||||
@ -1409,7 +1414,7 @@ hb_font_get_glyph_shape (hb_font_t *font,
|
||||
/**
|
||||
* hb_font_draw_glyph:
|
||||
* @font: #hb_font_t to work upon
|
||||
* @glyph: : The glyph ID
|
||||
* @glyph: The glyph ID
|
||||
* @dfuncs: #hb_draw_funcs_t to draw to
|
||||
* @draw_data: User data to pass to draw callbacks
|
||||
*
|
||||
@ -2649,6 +2654,79 @@ hb_font_set_variations (hb_font_t *font,
|
||||
_hb_font_adopt_var_coords (font, normalized, design_coords, coords_length);
|
||||
}
|
||||
|
||||
/**
|
||||
* hb_font_set_variation:
|
||||
* @font: #hb_font_t to work upon
|
||||
* @tag: The #hb_tag_t tag of the variation-axis name
|
||||
* @value: The value of the variation axis
|
||||
*
|
||||
* Change the value of one variation axis on the font.
|
||||
*
|
||||
* Note: This function is expensive to be called repeatedly.
|
||||
* If you want to set multiple variation axes at the same time,
|
||||
* use hb_font_set_variations() instead.
|
||||
*
|
||||
* Since: 7.1.0
|
||||
*/
|
||||
void
|
||||
hb_font_set_variation (hb_font_t *font,
|
||||
hb_tag_t tag,
|
||||
float value)
|
||||
{
|
||||
if (hb_object_is_immutable (font))
|
||||
return;
|
||||
|
||||
font->serial_coords = ++font->serial;
|
||||
|
||||
// TODO Share some of this code with set_variations()
|
||||
|
||||
const OT::fvar &fvar = *font->face->table.fvar;
|
||||
auto axes = fvar.get_axes ();
|
||||
const unsigned coords_length = axes.length;
|
||||
|
||||
int *normalized = coords_length ? (int *) hb_calloc (coords_length, sizeof (int)) : nullptr;
|
||||
float *design_coords = coords_length ? (float *) hb_calloc (coords_length, sizeof (float)) : nullptr;
|
||||
|
||||
if (unlikely (coords_length && !(normalized && design_coords)))
|
||||
{
|
||||
hb_free (normalized);
|
||||
hb_free (design_coords);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Initialize design coords. */
|
||||
if (font->design_coords)
|
||||
{
|
||||
assert (coords_length == font->num_coords);
|
||||
for (unsigned int i = 0; i < coords_length; i++)
|
||||
design_coords[i] = font->design_coords[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
for (unsigned int i = 0; i < coords_length; i++)
|
||||
design_coords[i] = axes[i].get_default ();
|
||||
if (font->instance_index != HB_FONT_NO_VAR_NAMED_INSTANCE)
|
||||
{
|
||||
unsigned count = coords_length;
|
||||
/* This may fail if index is out-of-range;
|
||||
* That's why we initialize design_coords from fvar above
|
||||
* unconditionally. */
|
||||
hb_ot_var_named_instance_get_design_coords (font->face, font->instance_index,
|
||||
&count, design_coords);
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned axis_index = 0; axis_index < coords_length; axis_index++)
|
||||
if (axes[axis_index].axisTag == tag)
|
||||
design_coords[axis_index] = value;
|
||||
|
||||
font->face->table.avar->map_coords (normalized, coords_length);
|
||||
|
||||
hb_ot_var_normalize_coords (font->face, coords_length, design_coords, normalized);
|
||||
_hb_font_adopt_var_coords (font, normalized, design_coords, coords_length);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* hb_font_set_var_coords_design:
|
||||
* @font: #hb_font_t to work upon
|
||||
|
@ -1150,6 +1150,11 @@ hb_font_set_variations (hb_font_t *font,
|
||||
const hb_variation_t *variations,
|
||||
unsigned int variations_length);
|
||||
|
||||
HB_EXTERN void
|
||||
hb_font_set_variation (hb_font_t *font,
|
||||
hb_tag_t tag,
|
||||
float value);
|
||||
|
||||
HB_EXTERN void
|
||||
hb_font_set_var_coords_design (hb_font_t *font,
|
||||
const float *coords,
|
||||
|
@ -85,7 +85,7 @@
|
||||
*/
|
||||
|
||||
|
||||
using hb_ft_advance_cache_t = hb_cache_t<16, 8, 8, false>;
|
||||
using hb_ft_advance_cache_t = hb_cache_t<16, 24, 8, false>;
|
||||
|
||||
struct hb_ft_font_t
|
||||
{
|
||||
|
@ -399,7 +399,7 @@ void
|
||||
hb_map_keys (const hb_map_t *map,
|
||||
hb_set_t *keys)
|
||||
{
|
||||
map->keys (*keys);
|
||||
hb_copy (map->keys() , *keys);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -415,5 +415,5 @@ void
|
||||
hb_map_values (const hb_map_t *map,
|
||||
hb_set_t *values)
|
||||
{
|
||||
map->values (*values);
|
||||
hb_copy (map->values() , *values);
|
||||
}
|
||||
|
@ -317,16 +317,6 @@ struct hb_hashmap_t
|
||||
hb_copy (other, *this);
|
||||
}
|
||||
|
||||
void keys (hb_set_t &keys_) const
|
||||
{
|
||||
hb_copy (keys() , keys_);
|
||||
}
|
||||
|
||||
void values (hb_set_t &values_) const
|
||||
{
|
||||
hb_copy (values() , values_);
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterator
|
||||
*/
|
||||
@ -353,7 +343,8 @@ struct hb_hashmap_t
|
||||
)
|
||||
auto keys () const HB_AUTO_RETURN
|
||||
(
|
||||
+ keys_ref ()
|
||||
+ iter_items ()
|
||||
| hb_map (&item_t::key)
|
||||
| hb_map (hb_ridentity)
|
||||
)
|
||||
auto values_ref () const HB_AUTO_RETURN
|
||||
@ -363,7 +354,8 @@ struct hb_hashmap_t
|
||||
)
|
||||
auto values () const HB_AUTO_RETURN
|
||||
(
|
||||
+ values_ref ()
|
||||
+ iter_items ()
|
||||
| hb_map (&item_t::value)
|
||||
| hb_map (hb_ridentity)
|
||||
)
|
||||
|
||||
|
@ -404,7 +404,7 @@ struct CmapSubtableFormat4
|
||||
unsigned distance) const
|
||||
{
|
||||
if (k > last) return +1;
|
||||
if (k < (&last)[distance]) return -1;
|
||||
if (k < (&last)[distance]/*first*/) return -1;
|
||||
return 0;
|
||||
}
|
||||
HBUINT16 last;
|
||||
@ -413,7 +413,7 @@ struct CmapSubtableFormat4
|
||||
const HBUINT16 *found = hb_bsearch (codepoint,
|
||||
this->endCount,
|
||||
this->segCount,
|
||||
2,
|
||||
sizeof (CustomRange),
|
||||
_hb_cmp_method<hb_codepoint_t, CustomRange, unsigned>,
|
||||
this->segCount + 1);
|
||||
if (unlikely (!found))
|
||||
|
@ -93,6 +93,7 @@ HB_OT_ACCELERATOR (OT, cff2)
|
||||
#ifndef HB_NO_VAR
|
||||
HB_OT_CORE_TABLE (OT, fvar)
|
||||
HB_OT_CORE_TABLE (OT, avar)
|
||||
HB_OT_CORE_TABLE (OT, cvar)
|
||||
HB_OT_ACCELERATOR (OT, gvar)
|
||||
HB_OT_CORE_TABLE (OT, MVAR)
|
||||
#endif
|
||||
|
@ -413,7 +413,7 @@ hb_ot_get_glyph_extents (hb_font_t *font,
|
||||
if (ot_face->sbix->get_extents (font, glyph, extents)) return true;
|
||||
if (ot_face->CBDT->get_extents (font, glyph, extents)) return true;
|
||||
#endif
|
||||
#if !defined(HB_NO_COLOR)
|
||||
#if !defined(HB_NO_COLOR) && !defined(HB_NO_PAINT)
|
||||
if (ot_face->COLR->get_extents (font, glyph, extents)) return true;
|
||||
#endif
|
||||
if (ot_face->glyf->get_extents (font, glyph, extents)) return true;
|
||||
@ -633,20 +633,4 @@ hb_ot_font_set_funcs (hb_font_t *font)
|
||||
_hb_ot_font_destroy);
|
||||
}
|
||||
|
||||
#ifndef HB_NO_VAR
|
||||
bool
|
||||
_glyf_get_leading_bearing_with_var_unscaled (hb_font_t *font, hb_codepoint_t glyph, bool is_vertical,
|
||||
int *lsb)
|
||||
{
|
||||
return font->face->table.glyf->get_leading_bearing_with_var_unscaled (font, glyph, is_vertical, lsb);
|
||||
}
|
||||
|
||||
unsigned
|
||||
_glyf_get_advance_with_var_unscaled (hb_font_t *font, hb_codepoint_t glyph, bool is_vertical)
|
||||
{
|
||||
return font->face->table.glyf->get_advance_with_var_unscaled (font, glyph, is_vertical);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -76,7 +76,7 @@ struct DeviceRecord
|
||||
HBUINT8 maxWidth; /* Maximum width. */
|
||||
UnsizedArrayOf<HBUINT8> widthsZ; /* Array of widths (numGlyphs is from the 'maxp' table). */
|
||||
public:
|
||||
DEFINE_SIZE_ARRAY (2, widthsZ);
|
||||
DEFINE_SIZE_UNBOUNDED (2);
|
||||
};
|
||||
|
||||
|
||||
@ -87,14 +87,6 @@ struct hdmx
|
||||
unsigned int get_size () const
|
||||
{ return min_size + numRecords * sizeDeviceRecord; }
|
||||
|
||||
const DeviceRecord& operator [] (unsigned int i) const
|
||||
{
|
||||
/* XXX Null(DeviceRecord) is NOT safe as it's num-glyphs lengthed.
|
||||
* https://github.com/harfbuzz/harfbuzz/issues/1300 */
|
||||
if (unlikely (i >= numRecords)) return Null (DeviceRecord);
|
||||
return StructAtOffset<DeviceRecord> (&this->firstDeviceRecord, i * sizeDeviceRecord);
|
||||
}
|
||||
|
||||
template<typename Iterator,
|
||||
hb_requires (hb_is_iterator (Iterator))>
|
||||
bool serialize (hb_serialize_context_t *c, unsigned version, Iterator it)
|
||||
|
@ -63,7 +63,25 @@ struct head
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
return_trace (serialize (c->serializer));
|
||||
head *out = c->serializer->embed (this);
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
if (c->plan->normalized_coords)
|
||||
{
|
||||
if (unlikely (!c->serializer->check_assign (out->xMin, c->plan->head_maxp_info.xMin,
|
||||
HB_SERIALIZE_ERROR_INT_OVERFLOW)))
|
||||
return_trace (false);
|
||||
if (unlikely (!c->serializer->check_assign (out->xMax, c->plan->head_maxp_info.xMax,
|
||||
HB_SERIALIZE_ERROR_INT_OVERFLOW)))
|
||||
return_trace (false);
|
||||
if (unlikely (!c->serializer->check_assign (out->yMin, c->plan->head_maxp_info.yMin,
|
||||
HB_SERIALIZE_ERROR_INT_OVERFLOW)))
|
||||
return_trace (false);
|
||||
if (unlikely (!c->serializer->check_assign (out->yMax, c->plan->head_maxp_info.yMax,
|
||||
HB_SERIALIZE_ERROR_INT_OVERFLOW)))
|
||||
return_trace (false);
|
||||
}
|
||||
return_trace (true);
|
||||
}
|
||||
|
||||
enum mac_style_flag_t {
|
||||
|
@ -50,6 +50,9 @@ _glyf_get_leading_bearing_with_var_unscaled (hb_font_t *font, hb_codepoint_t gly
|
||||
HB_INTERNAL unsigned
|
||||
_glyf_get_advance_with_var_unscaled (hb_font_t *font, hb_codepoint_t glyph, bool is_vertical);
|
||||
|
||||
HB_INTERNAL bool
|
||||
_glyf_get_leading_bearing_without_var_unscaled (hb_face_t *face, hb_codepoint_t gid, bool is_vertical, int *lsb);
|
||||
|
||||
|
||||
namespace OT {
|
||||
|
||||
@ -92,7 +95,7 @@ struct hmtxvmtx
|
||||
|
||||
unsigned int length;
|
||||
H *table = (H *) hb_blob_get_data (dest_blob, &length);
|
||||
table->numberOfLongMetrics = num_hmetrics;
|
||||
c->serializer->check_assign (table->numberOfLongMetrics, num_hmetrics, HB_SERIALIZE_ERROR_INT_OVERFLOW);
|
||||
|
||||
#ifndef HB_NO_VAR
|
||||
if (c->plan->normalized_coords)
|
||||
@ -165,12 +168,19 @@ struct hmtxvmtx
|
||||
lm.sb = _.second;
|
||||
if (unlikely (!c->embed<LongMetric> (&lm))) return;
|
||||
}
|
||||
else
|
||||
else if (idx < 0x10000u)
|
||||
{
|
||||
FWORD *sb = c->allocate_size<FWORD> (FWORD::static_size);
|
||||
if (unlikely (!sb)) return;
|
||||
*sb = _.second;
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: This does not do tail optimization.
|
||||
UFWORD *adv = c->allocate_size<UFWORD> (UFWORD::static_size);
|
||||
if (unlikely (!adv)) return;
|
||||
*adv = _.first;
|
||||
}
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
@ -189,7 +199,7 @@ struct hmtxvmtx
|
||||
/* Determine num_long_metrics to encode. */
|
||||
auto& plan = c->plan;
|
||||
|
||||
num_long_metrics = plan->num_output_glyphs ();
|
||||
num_long_metrics = hb_min (plan->num_output_glyphs (), 0xFFFFu);
|
||||
unsigned int last_advance = get_new_gid_advance_unscaled (plan, mtx_map, num_long_metrics - 1, _mtx);
|
||||
while (num_long_metrics > 1 &&
|
||||
last_advance == get_new_gid_advance_unscaled (plan, mtx_map, num_long_metrics - 2, _mtx))
|
||||
@ -208,7 +218,8 @@ struct hmtxvmtx
|
||||
if (!c->plan->old_gid_for_new_gid (_, &old_gid))
|
||||
return hb_pair (0u, 0);
|
||||
int lsb = 0;
|
||||
(void) _mtx.get_leading_bearing_without_var_unscaled (old_gid, &lsb);
|
||||
if (!_mtx.get_leading_bearing_without_var_unscaled (old_gid, &lsb))
|
||||
(void) _glyf_get_leading_bearing_without_var_unscaled (c->plan->source, old_gid, !T::is_horizontal, &lsb);
|
||||
return hb_pair (_mtx.get_advance_without_var_unscaled (old_gid), +lsb);
|
||||
}
|
||||
return mtx_map->get (_);
|
||||
|
@ -529,6 +529,9 @@ struct FeatureParamsSize
|
||||
return_trace (true);
|
||||
}
|
||||
|
||||
void collect_name_ids (hb_set_t *nameids_to_retain /* OUT */) const
|
||||
{ nameids_to_retain->add (subfamilyNameID); }
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
@ -585,6 +588,9 @@ struct FeatureParamsStylisticSet
|
||||
return_trace (c->check_struct (this));
|
||||
}
|
||||
|
||||
void collect_name_ids (hb_set_t *nameids_to_retain /* OUT */) const
|
||||
{ nameids_to_retain->add (uiNameID); }
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
@ -632,6 +638,20 @@ struct FeatureParamsCharacterVariants
|
||||
unsigned get_size () const
|
||||
{ return min_size + characters.len * HBUINT24::static_size; }
|
||||
|
||||
void collect_name_ids (hb_set_t *nameids_to_retain /* OUT */) const
|
||||
{
|
||||
if (featUILableNameID) nameids_to_retain->add (featUILableNameID);
|
||||
if (featUITooltipTextNameID) nameids_to_retain->add (featUITooltipTextNameID);
|
||||
if (sampleTextNameID) nameids_to_retain->add (sampleTextNameID);
|
||||
|
||||
if (!firstParamUILabelNameID || !numNamedParameters || numNamedParameters >= 0x7FFF)
|
||||
return;
|
||||
|
||||
unsigned last_name_id = (unsigned) firstParamUILabelNameID + (unsigned) numNamedParameters - 1;
|
||||
if (last_name_id >= 256 && last_name_id <= 32767)
|
||||
nameids_to_retain->add_range (firstParamUILabelNameID, last_name_id);
|
||||
}
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
@ -694,6 +714,19 @@ struct FeatureParams
|
||||
return_trace (true);
|
||||
}
|
||||
|
||||
void collect_name_ids (hb_tag_t tag, hb_set_t *nameids_to_retain /* OUT */) const
|
||||
{
|
||||
#ifdef HB_NO_LAYOUT_FEATURE_PARAMS
|
||||
return;
|
||||
#endif
|
||||
if (tag == HB_TAG ('s','i','z','e'))
|
||||
return (u.size.collect_name_ids (nameids_to_retain));
|
||||
if ((tag & 0xFFFF0000u) == HB_TAG ('s','s','\0','\0')) /* ssXX */
|
||||
return (u.stylisticSet.collect_name_ids (nameids_to_retain));
|
||||
if ((tag & 0xFFFF0000u) == HB_TAG ('c','v','\0','\0')) /* cvXX */
|
||||
return (u.characterVariants.collect_name_ids (nameids_to_retain));
|
||||
}
|
||||
|
||||
bool subset (hb_subset_context_t *c, const Tag* tag) const
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
@ -762,6 +795,12 @@ struct Feature
|
||||
bool intersects_lookup_indexes (const hb_map_t *lookup_indexes) const
|
||||
{ return lookupIndex.intersects (lookup_indexes); }
|
||||
|
||||
void collect_name_ids (hb_tag_t tag, hb_set_t *nameids_to_retain /* OUT */) const
|
||||
{
|
||||
if (featureParams)
|
||||
get_feature_params ().collect_name_ids (tag, nameids_to_retain);
|
||||
}
|
||||
|
||||
bool subset (hb_subset_context_t *c,
|
||||
hb_subset_layout_context_t *l,
|
||||
const Tag *tag = nullptr) const
|
||||
@ -2233,19 +2272,20 @@ struct VarRegionAxis
|
||||
{
|
||||
float evaluate (int coord) const
|
||||
{
|
||||
int start = startCoord.to_int (), peak = peakCoord.to_int (), end = endCoord.to_int ();
|
||||
int peak = peakCoord.to_int ();
|
||||
if (peak == 0 || coord == peak)
|
||||
return 1.f;
|
||||
|
||||
int start = startCoord.to_int (), end = endCoord.to_int ();
|
||||
|
||||
/* TODO Move these to sanitize(). */
|
||||
if (unlikely (start > peak || peak > end))
|
||||
return 1.;
|
||||
return 1.f;
|
||||
if (unlikely (start < 0 && end > 0 && peak != 0))
|
||||
return 1.;
|
||||
|
||||
if (peak == 0 || coord == peak)
|
||||
return 1.;
|
||||
return 1.f;
|
||||
|
||||
if (coord <= start || end <= coord)
|
||||
return 0.;
|
||||
return 0.f;
|
||||
|
||||
/* Interpolate */
|
||||
if (coord < peak)
|
||||
@ -2462,10 +2502,9 @@ struct VarData
|
||||
{
|
||||
for (r = 0; r < src_word_count; r++)
|
||||
{
|
||||
for (unsigned int i = 0; i < inner_map.get_next_value (); i++)
|
||||
for (unsigned old_gid : inner_map.keys())
|
||||
{
|
||||
unsigned int old = inner_map.backward (i);
|
||||
int32_t delta = src->get_item_delta_fast (old, r, src_delta_bytes, src_row_size);
|
||||
int32_t delta = src->get_item_delta_fast (old_gid, r, src_delta_bytes, src_row_size);
|
||||
if (delta < -65536 || 65535 < delta)
|
||||
{
|
||||
has_long = true;
|
||||
@ -2482,10 +2521,9 @@ struct VarData
|
||||
bool short_circuit = src_long_words == has_long && src_word_count <= r;
|
||||
|
||||
delta_sz[r] = kZero;
|
||||
for (unsigned int i = 0; i < inner_map.get_next_value (); i++)
|
||||
for (unsigned old_gid : inner_map.keys())
|
||||
{
|
||||
unsigned int old = inner_map.backward (i);
|
||||
int32_t delta = src->get_item_delta_fast (old, r, src_delta_bytes, src_row_size);
|
||||
int32_t delta = src->get_item_delta_fast (old_gid, r, src_delta_bytes, src_row_size);
|
||||
if (delta < min_threshold || max_threshold < delta)
|
||||
{
|
||||
delta_sz[r] = kWord;
|
||||
@ -2546,8 +2584,8 @@ struct VarData
|
||||
{
|
||||
unsigned int region = regionIndices.arrayZ[r];
|
||||
if (region_indices.has (region)) continue;
|
||||
for (unsigned int i = 0; i < inner_map.get_next_value (); i++)
|
||||
if (get_item_delta_fast (inner_map.backward (i), r, delta_bytes, row_size) != 0)
|
||||
for (hb_codepoint_t old_gid : inner_map.keys())
|
||||
if (get_item_delta_fast (old_gid, r, delta_bytes, row_size) != 0)
|
||||
{
|
||||
region_indices.add (region);
|
||||
break;
|
||||
|
@ -487,7 +487,8 @@ struct hb_ot_apply_context_t :
|
||||
/* Ignore ZWJ if we are matching context, or asked to. */
|
||||
matcher.set_ignore_zwj (context_match || c->auto_zwj);
|
||||
matcher.set_mask (context_match ? -1 : c->lookup_mask);
|
||||
matcher.set_per_syllable (c->per_syllable);
|
||||
/* Per syllable matching is only for GSUB. */
|
||||
matcher.set_per_syllable (c->table_index == 0 && c->per_syllable);
|
||||
}
|
||||
void set_lookup_props (unsigned int lookup_props)
|
||||
{
|
||||
@ -4461,6 +4462,18 @@ struct GSUBGPOS
|
||||
}
|
||||
}
|
||||
|
||||
void collect_name_ids (const hb_map_t *feature_index_map,
|
||||
hb_set_t *nameids_to_retain /* OUT */) const
|
||||
{
|
||||
unsigned count = get_feature_count ();
|
||||
for (unsigned i = 0 ; i < count; i++)
|
||||
{
|
||||
if (!feature_index_map->has (i)) continue;
|
||||
hb_tag_t tag = get_feature_tag (i);
|
||||
get_feature (i).collect_name_ids (tag, nameids_to_retain);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
struct accelerator_t
|
||||
{
|
||||
|
@ -64,6 +64,8 @@ using OT::Layout::GPOS;
|
||||
* @include: hb-ot.h
|
||||
*
|
||||
* Functions for querying OpenType Layout features in the font face.
|
||||
* See the <ulink url="http://www.microsoft.com/typography/otspec/">OpenType
|
||||
* specification</ulink> for details.
|
||||
**/
|
||||
|
||||
|
||||
|
@ -100,7 +100,7 @@ struct maxp
|
||||
maxp *maxp_prime = c->serializer->embed (this);
|
||||
if (unlikely (!maxp_prime)) return_trace (false);
|
||||
|
||||
maxp_prime->numGlyphs = c->plan->num_output_glyphs ();
|
||||
maxp_prime->numGlyphs = hb_min (c->plan->num_output_glyphs (), 0xFFFFu);
|
||||
if (maxp_prime->version.major == 1)
|
||||
{
|
||||
const maxpV1Tail *src_v1 = &StructAfter<maxpV1Tail> (*this);
|
||||
|
@ -181,6 +181,4 @@ hb_ot_name_get_utf32 (hb_face_t *face,
|
||||
return hb_ot_name_get_utf<hb_utf32_t> (face, name_id, language, text_size, text);
|
||||
}
|
||||
|
||||
#include "hb-ot-name-language-static.hh"
|
||||
|
||||
#endif
|
||||
|
@ -99,6 +99,10 @@ struct post
|
||||
post *post_prime = c->serializer->start_embed<post> ();
|
||||
if (unlikely (!post_prime)) return_trace (false);
|
||||
|
||||
bool glyph_names = c->plan->flags & HB_SUBSET_FLAGS_GLYPH_NAMES;
|
||||
if (!serialize (c->serializer, glyph_names))
|
||||
return_trace (false);
|
||||
|
||||
#ifndef HB_NO_VAR
|
||||
if (c->plan->normalized_coords)
|
||||
{
|
||||
@ -110,10 +114,6 @@ struct post
|
||||
}
|
||||
#endif
|
||||
|
||||
bool glyph_names = c->plan->flags & HB_SUBSET_FLAGS_GLYPH_NAMES;
|
||||
if (!serialize (c->serializer, glyph_names))
|
||||
return_trace (false);
|
||||
|
||||
if (c->plan->user_axes_location.has (HB_TAG ('s','l','n','t')) &&
|
||||
!c->plan->pinned_at_default)
|
||||
{
|
||||
|
@ -1067,12 +1067,15 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan,
|
||||
base = i;
|
||||
while (base < end && is_halant (info[base]))
|
||||
base++;
|
||||
info[base].indic_position() = POS_BASE_C;
|
||||
if (base < end)
|
||||
info[base].indic_position() = POS_BASE_C;
|
||||
|
||||
try_pref = false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (base == end)
|
||||
break;
|
||||
}
|
||||
/* For Malayalam, skip over unformed below- (but NOT post-) forms. */
|
||||
if (buffer->props.script == HB_SCRIPT_MALAYALAM)
|
||||
|
@ -536,6 +536,8 @@ struct STAT
|
||||
| hb_map (&AxisValue::get_value_name_id)
|
||||
| hb_sink (nameids_to_retain)
|
||||
;
|
||||
|
||||
nameids_to_retain->add (elidedFallbackNameID);
|
||||
}
|
||||
|
||||
bool subset (hb_subset_context_t *c) const
|
||||
|
@ -222,21 +222,371 @@ struct DeltaSetIndexMap
|
||||
|
||||
struct VarStoreInstancer
|
||||
{
|
||||
VarStoreInstancer (const VariationStore &varStore,
|
||||
const DeltaSetIndexMap &varIdxMap,
|
||||
VarStoreInstancer (const VariationStore *varStore,
|
||||
const DeltaSetIndexMap *varIdxMap,
|
||||
hb_array_t<int> coords) :
|
||||
varStore (varStore), varIdxMap (varIdxMap), coords (coords) {}
|
||||
|
||||
operator bool () const { return bool (coords); }
|
||||
operator bool () const { return varStore && bool (coords); }
|
||||
|
||||
/* according to the spec, if colr table has varStore but does not have
|
||||
* varIdxMap, then an implicit identity mapping is used */
|
||||
float operator() (uint32_t varIdx, unsigned short offset = 0) const
|
||||
{ return varStore.get_delta (varIdxMap.map (VarIdx::add (varIdx, offset)), coords); }
|
||||
{ return varStore->get_delta (varIdxMap ? varIdxMap->map (VarIdx::add (varIdx, offset)) : varIdx + offset, coords); }
|
||||
|
||||
const VariationStore &varStore;
|
||||
const DeltaSetIndexMap &varIdxMap;
|
||||
const VariationStore *varStore;
|
||||
const DeltaSetIndexMap *varIdxMap;
|
||||
hb_array_t<int> coords;
|
||||
};
|
||||
|
||||
/* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */
|
||||
struct TupleVariationHeader
|
||||
{
|
||||
unsigned get_size (unsigned axis_count) const
|
||||
{ return min_size + get_all_tuples (axis_count).get_size (); }
|
||||
|
||||
unsigned get_data_size () const { return varDataSize; }
|
||||
|
||||
const TupleVariationHeader &get_next (unsigned axis_count) const
|
||||
{ return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); }
|
||||
|
||||
float calculate_scalar (hb_array_t<int> coords, unsigned int coord_count,
|
||||
const hb_array_t<const F2DOT14> shared_tuples,
|
||||
const hb_vector_t<int> *shared_tuple_active_idx = nullptr) const
|
||||
{
|
||||
const F2DOT14 *peak_tuple;
|
||||
|
||||
unsigned start_idx = 0;
|
||||
unsigned end_idx = coord_count;
|
||||
|
||||
if (has_peak ())
|
||||
peak_tuple = get_peak_tuple (coord_count).arrayZ;
|
||||
else
|
||||
{
|
||||
unsigned int index = get_index ();
|
||||
if (unlikely ((index + 1) * coord_count > shared_tuples.length))
|
||||
return 0.f;
|
||||
peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count).arrayZ;
|
||||
|
||||
if (shared_tuple_active_idx)
|
||||
{
|
||||
assert (index < shared_tuple_active_idx->length);
|
||||
int v = (*shared_tuple_active_idx).arrayZ[index];
|
||||
if (v != -1)
|
||||
{
|
||||
start_idx = v;
|
||||
end_idx = start_idx + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const F2DOT14 *start_tuple = nullptr;
|
||||
const F2DOT14 *end_tuple = nullptr;
|
||||
bool has_interm = has_intermediate ();
|
||||
if (has_interm)
|
||||
{
|
||||
start_tuple = get_start_tuple (coord_count).arrayZ;
|
||||
end_tuple = get_end_tuple (coord_count).arrayZ;
|
||||
}
|
||||
|
||||
float scalar = 1.f;
|
||||
for (unsigned int i = start_idx; i < end_idx; i++)
|
||||
{
|
||||
int peak = peak_tuple[i].to_int ();
|
||||
if (!peak) continue;
|
||||
|
||||
int v = coords[i];
|
||||
if (v == peak) continue;
|
||||
|
||||
if (has_interm)
|
||||
{
|
||||
int start = start_tuple[i].to_int ();
|
||||
int end = end_tuple[i].to_int ();
|
||||
if (unlikely (start > peak || peak > end ||
|
||||
(start < 0 && end > 0 && peak))) continue;
|
||||
if (v < start || v > end) return 0.f;
|
||||
if (v < peak)
|
||||
{ if (peak != start) scalar *= (float) (v - start) / (peak - start); }
|
||||
else
|
||||
{ if (peak != end) scalar *= (float) (end - v) / (end - peak); }
|
||||
}
|
||||
else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.f;
|
||||
else
|
||||
scalar *= (float) v / peak;
|
||||
}
|
||||
return scalar;
|
||||
}
|
||||
|
||||
bool has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; }
|
||||
bool has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; }
|
||||
bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; }
|
||||
unsigned get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; }
|
||||
|
||||
protected:
|
||||
struct TuppleIndex : HBUINT16
|
||||
{
|
||||
enum Flags {
|
||||
EmbeddedPeakTuple = 0x8000u,
|
||||
IntermediateRegion = 0x4000u,
|
||||
PrivatePointNumbers = 0x2000u,
|
||||
TupleIndexMask = 0x0FFFu
|
||||
};
|
||||
|
||||
DEFINE_SIZE_STATIC (2);
|
||||
};
|
||||
|
||||
hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const
|
||||
{ return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); }
|
||||
hb_array_t<const F2DOT14> get_peak_tuple (unsigned axis_count) const
|
||||
{ return get_all_tuples (axis_count).sub_array (0, axis_count); }
|
||||
hb_array_t<const F2DOT14> get_start_tuple (unsigned axis_count) const
|
||||
{ return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); }
|
||||
hb_array_t<const F2DOT14> get_end_tuple (unsigned axis_count) const
|
||||
{ return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); }
|
||||
|
||||
HBUINT16 varDataSize; /* The size in bytes of the serialized
|
||||
* data for this tuple variation table. */
|
||||
TuppleIndex tupleIndex; /* A packed field. The high 4 bits are flags (see below).
|
||||
The low 12 bits are an index into a shared tuple
|
||||
records array. */
|
||||
/* UnsizedArrayOf<F2DOT14> peakTuple - optional */
|
||||
/* Peak tuple record for this tuple variation table — optional,
|
||||
* determined by flags in the tupleIndex value.
|
||||
*
|
||||
* Note that this must always be included in the 'cvar' table. */
|
||||
/* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */
|
||||
/* Intermediate start tuple record for this tuple variation table — optional,
|
||||
determined by flags in the tupleIndex value. */
|
||||
/* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */
|
||||
/* Intermediate end tuple record for this tuple variation table — optional,
|
||||
* determined by flags in the tupleIndex value. */
|
||||
public:
|
||||
DEFINE_SIZE_MIN (4);
|
||||
};
|
||||
|
||||
struct TupleVariationData
|
||||
{
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
{
|
||||
TRACE_SANITIZE (this);
|
||||
// here check on min_size only, TupleVariationHeader and var data will be
|
||||
// checked while accessing through iterator.
|
||||
return_trace (c->check_struct (this));
|
||||
}
|
||||
|
||||
unsigned get_size (unsigned axis_count) const
|
||||
{
|
||||
unsigned total_size = min_size;
|
||||
unsigned count = tupleVarCount;
|
||||
const TupleVariationHeader *tuple_var_header = &(get_tuple_var_header());
|
||||
for (unsigned i = 0; i < count; i++)
|
||||
{
|
||||
total_size += tuple_var_header->get_size (axis_count) + tuple_var_header->get_data_size ();
|
||||
tuple_var_header = &tuple_var_header->get_next (axis_count);
|
||||
}
|
||||
|
||||
return total_size;
|
||||
}
|
||||
|
||||
const TupleVariationHeader &get_tuple_var_header (void) const
|
||||
{ return StructAfter<TupleVariationHeader> (data); }
|
||||
|
||||
struct tuple_iterator_t
|
||||
{
|
||||
void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_, const void *table_base_)
|
||||
{
|
||||
var_data_bytes = var_data_bytes_;
|
||||
var_data = var_data_bytes_.as<TupleVariationData> ();
|
||||
index = 0;
|
||||
axis_count = axis_count_;
|
||||
current_tuple = &var_data->get_tuple_var_header ();
|
||||
data_offset = 0;
|
||||
table_base = table_base_;
|
||||
}
|
||||
|
||||
bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */)
|
||||
{
|
||||
if (var_data->has_shared_point_numbers ())
|
||||
{
|
||||
const HBUINT8 *base = &(table_base+var_data->data);
|
||||
const HBUINT8 *p = base;
|
||||
if (!unpack_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false;
|
||||
data_offset = p - base;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_valid () const
|
||||
{
|
||||
return (index < var_data->tupleVarCount.get_count ()) &&
|
||||
var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) &&
|
||||
var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (),
|
||||
current_tuple->get_size (axis_count)));
|
||||
}
|
||||
|
||||
bool move_to_next ()
|
||||
{
|
||||
data_offset += current_tuple->get_data_size ();
|
||||
current_tuple = ¤t_tuple->get_next (axis_count);
|
||||
index++;
|
||||
return is_valid ();
|
||||
}
|
||||
|
||||
const HBUINT8 *get_serialized_data () const
|
||||
{ return &(table_base+var_data->data) + data_offset; }
|
||||
|
||||
private:
|
||||
const TupleVariationData *var_data;
|
||||
unsigned int index;
|
||||
unsigned int axis_count;
|
||||
unsigned int data_offset;
|
||||
const void *table_base;
|
||||
|
||||
public:
|
||||
hb_bytes_t var_data_bytes;
|
||||
const TupleVariationHeader *current_tuple;
|
||||
};
|
||||
|
||||
static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count,
|
||||
const void *table_base,
|
||||
hb_vector_t<unsigned int> &shared_indices /* OUT */,
|
||||
tuple_iterator_t *iterator /* OUT */)
|
||||
{
|
||||
iterator->init (var_data_bytes, axis_count, table_base);
|
||||
if (!iterator->get_shared_indices (shared_indices))
|
||||
return false;
|
||||
return iterator->is_valid ();
|
||||
}
|
||||
|
||||
bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); }
|
||||
|
||||
static bool unpack_points (const HBUINT8 *&p /* IN/OUT */,
|
||||
hb_vector_t<unsigned int> &points /* OUT */,
|
||||
const HBUINT8 *end)
|
||||
{
|
||||
enum packed_point_flag_t
|
||||
{
|
||||
POINTS_ARE_WORDS = 0x80,
|
||||
POINT_RUN_COUNT_MASK = 0x7F
|
||||
};
|
||||
|
||||
if (unlikely (p + 1 > end)) return false;
|
||||
|
||||
unsigned count = *p++;
|
||||
if (count & POINTS_ARE_WORDS)
|
||||
{
|
||||
if (unlikely (p + 1 > end)) return false;
|
||||
count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++;
|
||||
}
|
||||
if (unlikely (!points.resize (count, false))) return false;
|
||||
|
||||
unsigned n = 0;
|
||||
unsigned i = 0;
|
||||
while (i < count)
|
||||
{
|
||||
if (unlikely (p + 1 > end)) return false;
|
||||
unsigned control = *p++;
|
||||
unsigned run_count = (control & POINT_RUN_COUNT_MASK) + 1;
|
||||
unsigned stop = i + run_count;
|
||||
if (unlikely (stop > count)) return false;
|
||||
if (control & POINTS_ARE_WORDS)
|
||||
{
|
||||
if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
|
||||
for (; i < stop; i++)
|
||||
{
|
||||
n += *(const HBUINT16 *)p;
|
||||
points.arrayZ[i] = n;
|
||||
p += HBUINT16::static_size;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (unlikely (p + run_count > end)) return false;
|
||||
for (; i < stop; i++)
|
||||
{
|
||||
n += *p++;
|
||||
points.arrayZ[i] = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool unpack_deltas (const HBUINT8 *&p /* IN/OUT */,
|
||||
hb_vector_t<int> &deltas /* IN/OUT */,
|
||||
const HBUINT8 *end)
|
||||
{
|
||||
enum packed_delta_flag_t
|
||||
{
|
||||
DELTAS_ARE_ZERO = 0x80,
|
||||
DELTAS_ARE_WORDS = 0x40,
|
||||
DELTA_RUN_COUNT_MASK = 0x3F
|
||||
};
|
||||
|
||||
unsigned i = 0;
|
||||
unsigned count = deltas.length;
|
||||
while (i < count)
|
||||
{
|
||||
if (unlikely (p + 1 > end)) return false;
|
||||
unsigned control = *p++;
|
||||
unsigned run_count = (control & DELTA_RUN_COUNT_MASK) + 1;
|
||||
unsigned stop = i + run_count;
|
||||
if (unlikely (stop > count)) return false;
|
||||
if (control & DELTAS_ARE_ZERO)
|
||||
{
|
||||
for (; i < stop; i++)
|
||||
deltas.arrayZ[i] = 0;
|
||||
}
|
||||
else if (control & DELTAS_ARE_WORDS)
|
||||
{
|
||||
if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
|
||||
for (; i < stop; i++)
|
||||
{
|
||||
deltas.arrayZ[i] = * (const HBINT16 *) p;
|
||||
p += HBUINT16::static_size;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (unlikely (p + run_count > end)) return false;
|
||||
for (; i < stop; i++)
|
||||
{
|
||||
deltas.arrayZ[i] = * (const HBINT8 *) p++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool has_data () const { return tupleVarCount; }
|
||||
|
||||
protected:
|
||||
struct TupleVarCount : HBUINT16
|
||||
{
|
||||
bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); }
|
||||
unsigned int get_count () const { return (*this) & CountMask; }
|
||||
|
||||
protected:
|
||||
enum Flags
|
||||
{
|
||||
SharedPointNumbers= 0x8000u,
|
||||
CountMask = 0x0FFFu
|
||||
};
|
||||
public:
|
||||
DEFINE_SIZE_STATIC (2);
|
||||
};
|
||||
|
||||
TupleVarCount tupleVarCount; /* A packed field. The high 4 bits are flags, and the
|
||||
* low 12 bits are the number of tuple variation tables
|
||||
* for this glyph. The number of tuple variation tables
|
||||
* can be any number between 1 and 4095. */
|
||||
Offset16To<HBUINT8>
|
||||
data; /* Offset from the start of the base table
|
||||
* to the serialized data. */
|
||||
/* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
|
||||
public:
|
||||
DEFINE_SIZE_MIN (4);
|
||||
};
|
||||
|
||||
} /* namespace OT */
|
||||
|
||||
|
@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Copyright © 2023 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef HB_OT_VAR_CVAR_TABLE_HH
|
||||
#define HB_OT_VAR_CVAR_TABLE_HH
|
||||
|
||||
#include "hb-ot-var-common.hh"
|
||||
|
||||
|
||||
namespace OT {
|
||||
/*
|
||||
* cvar -- control value table (CVT) Variations
|
||||
* https://docs.microsoft.com/en-us/typography/opentype/spec/cvar
|
||||
*/
|
||||
#define HB_OT_TAG_cvar HB_TAG('c','v','a','r')
|
||||
|
||||
struct cvar
|
||||
{
|
||||
static constexpr hb_tag_t tableTag = HB_OT_TAG_cvar;
|
||||
|
||||
bool sanitize (hb_sanitize_context_t *c) const
|
||||
{
|
||||
TRACE_SANITIZE (this);
|
||||
return_trace (c->check_struct (this) &&
|
||||
version.sanitize (c) && likely (version.major == 1) &&
|
||||
tupleVariationData.sanitize (c));
|
||||
}
|
||||
|
||||
const TupleVariationData* get_tuple_var_data (void) const
|
||||
{ return &tupleVariationData; }
|
||||
|
||||
static bool calculate_cvt_deltas (unsigned axis_count,
|
||||
hb_array_t<int> coords,
|
||||
unsigned num_cvt_item,
|
||||
const TupleVariationData *tuple_var_data,
|
||||
const void *base,
|
||||
hb_vector_t<float>& cvt_deltas /* OUT */)
|
||||
{
|
||||
if (!coords) return true;
|
||||
hb_vector_t<unsigned> shared_indices;
|
||||
TupleVariationData::tuple_iterator_t iterator;
|
||||
unsigned var_data_length = tuple_var_data->get_size (axis_count);
|
||||
hb_bytes_t var_data_bytes = hb_bytes_t (reinterpret_cast<const char*> (tuple_var_data), var_data_length);
|
||||
if (!TupleVariationData::get_tuple_iterator (var_data_bytes, axis_count, base,
|
||||
shared_indices, &iterator))
|
||||
return true; /* isn't applied at all */
|
||||
|
||||
hb_array_t<const F2DOT14> shared_tuples = hb_array<F2DOT14> ();
|
||||
hb_vector_t<unsigned> private_indices;
|
||||
hb_vector_t<int> unpacked_deltas;
|
||||
|
||||
do
|
||||
{
|
||||
float scalar = iterator.current_tuple->calculate_scalar (coords, axis_count, shared_tuples);
|
||||
if (scalar == 0.f) continue;
|
||||
const HBUINT8 *p = iterator.get_serialized_data ();
|
||||
unsigned int length = iterator.current_tuple->get_data_size ();
|
||||
if (unlikely (!iterator.var_data_bytes.check_range (p, length)))
|
||||
return false;
|
||||
|
||||
const HBUINT8 *end = p + length;
|
||||
|
||||
bool has_private_points = iterator.current_tuple->has_private_points ();
|
||||
if (has_private_points &&
|
||||
!TupleVariationData::unpack_points (p, private_indices, end))
|
||||
return false;
|
||||
const hb_vector_t<unsigned int> &indices = has_private_points ? private_indices : shared_indices;
|
||||
|
||||
bool apply_to_all = (indices.length == 0);
|
||||
unsigned num_deltas = apply_to_all ? num_cvt_item : indices.length;
|
||||
if (unlikely (!unpacked_deltas.resize (num_deltas, false))) return false;
|
||||
if (unlikely (!TupleVariationData::unpack_deltas (p, unpacked_deltas, end))) return false;
|
||||
|
||||
for (unsigned int i = 0; i < num_deltas; i++)
|
||||
{
|
||||
unsigned int idx = apply_to_all ? i : indices[i];
|
||||
if (unlikely (idx >= num_cvt_item)) continue;
|
||||
if (scalar != 1.0f) cvt_deltas[idx] += unpacked_deltas[i] * scalar ;
|
||||
else cvt_deltas[idx] += unpacked_deltas[i];
|
||||
}
|
||||
} while (iterator.move_to_next ());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool add_cvt_and_apply_deltas (hb_subset_plan_t *plan,
|
||||
const TupleVariationData *tuple_var_data,
|
||||
const void *base)
|
||||
{
|
||||
const hb_tag_t cvt = HB_TAG('c','v','t',' ');
|
||||
hb_blob_t *cvt_blob = hb_face_reference_table (plan->source, cvt);
|
||||
hb_blob_t *cvt_prime_blob = hb_blob_copy_writable_or_fail (cvt_blob);
|
||||
hb_blob_destroy (cvt_blob);
|
||||
|
||||
if (unlikely (!cvt_prime_blob))
|
||||
return false;
|
||||
|
||||
unsigned cvt_blob_length = hb_blob_get_length (cvt_prime_blob);
|
||||
unsigned num_cvt_item = cvt_blob_length / FWORD::static_size;
|
||||
|
||||
hb_vector_t<float> cvt_deltas;
|
||||
if (unlikely (!cvt_deltas.resize (num_cvt_item)))
|
||||
{
|
||||
hb_blob_destroy (cvt_prime_blob);
|
||||
return false;
|
||||
}
|
||||
hb_memset (cvt_deltas.arrayZ, 0, cvt_deltas.get_size ());
|
||||
|
||||
if (!calculate_cvt_deltas (plan->normalized_coords.length, plan->normalized_coords.as_array (),
|
||||
num_cvt_item, tuple_var_data, base, cvt_deltas))
|
||||
{
|
||||
hb_blob_destroy (cvt_prime_blob);
|
||||
return false;
|
||||
}
|
||||
|
||||
FWORD *cvt_prime = (FWORD *) hb_blob_get_data_writable (cvt_prime_blob, nullptr);
|
||||
for (unsigned i = 0; i < num_cvt_item; i++)
|
||||
cvt_prime[i] += (int) roundf (cvt_deltas[i]);
|
||||
|
||||
bool success = plan->add_table (cvt, cvt_prime_blob);
|
||||
hb_blob_destroy (cvt_prime_blob);
|
||||
return success;
|
||||
}
|
||||
|
||||
protected:
|
||||
FixedVersion<>version; /* Version of the CVT variation table
|
||||
* initially set to 0x00010000u */
|
||||
TupleVariationData tupleVariationData; /* TupleVariationDate for cvar table */
|
||||
public:
|
||||
DEFINE_SIZE_MIN (8);
|
||||
};
|
||||
|
||||
} /* namespace OT */
|
||||
|
||||
|
||||
#endif /* HB_OT_VAR_CVAR_TABLE_HH */
|
@ -29,6 +29,7 @@
|
||||
#define HB_OT_VAR_GVAR_TABLE_HH
|
||||
|
||||
#include "hb-open-type.hh"
|
||||
#include "hb-ot-var-common.hh"
|
||||
|
||||
/*
|
||||
* gvar -- Glyph Variation Table
|
||||
@ -90,311 +91,8 @@ struct contour_point_vector_t : hb_vector_t<contour_point_t>
|
||||
}
|
||||
};
|
||||
|
||||
/* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */
|
||||
struct TupleVariationHeader
|
||||
{
|
||||
unsigned get_size (unsigned axis_count) const
|
||||
{ return min_size + get_all_tuples (axis_count).get_size (); }
|
||||
|
||||
unsigned get_data_size () const { return varDataSize; }
|
||||
|
||||
const TupleVariationHeader &get_next (unsigned axis_count) const
|
||||
{ return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); }
|
||||
|
||||
float calculate_scalar (hb_array_t<int> coords, unsigned int coord_count,
|
||||
const hb_array_t<const F2DOT14> shared_tuples) const
|
||||
{
|
||||
hb_array_t<const F2DOT14> peak_tuple;
|
||||
|
||||
if (has_peak ())
|
||||
peak_tuple = get_peak_tuple (coord_count);
|
||||
else
|
||||
{
|
||||
unsigned int index = get_index ();
|
||||
if (unlikely (index * coord_count >= shared_tuples.length))
|
||||
return 0.f;
|
||||
peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count);
|
||||
}
|
||||
|
||||
hb_array_t<const F2DOT14> start_tuple;
|
||||
hb_array_t<const F2DOT14> end_tuple;
|
||||
if (has_intermediate ())
|
||||
{
|
||||
start_tuple = get_start_tuple (coord_count);
|
||||
end_tuple = get_end_tuple (coord_count);
|
||||
}
|
||||
|
||||
float scalar = 1.f;
|
||||
for (unsigned int i = 0; i < coord_count; i++)
|
||||
{
|
||||
int v = coords[i];
|
||||
int peak = peak_tuple[i].to_int ();
|
||||
if (!peak || v == peak) continue;
|
||||
|
||||
if (has_intermediate ())
|
||||
{
|
||||
int start = start_tuple[i].to_int ();
|
||||
int end = end_tuple[i].to_int ();
|
||||
if (unlikely (start > peak || peak > end ||
|
||||
(start < 0 && end > 0 && peak))) continue;
|
||||
if (v < start || v > end) return 0.f;
|
||||
if (v < peak)
|
||||
{ if (peak != start) scalar *= (float) (v - start) / (peak - start); }
|
||||
else
|
||||
{ if (peak != end) scalar *= (float) (end - v) / (end - peak); }
|
||||
}
|
||||
else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.f;
|
||||
else
|
||||
scalar *= (float) v / peak;
|
||||
}
|
||||
return scalar;
|
||||
}
|
||||
|
||||
bool has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; }
|
||||
bool has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; }
|
||||
bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; }
|
||||
unsigned get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; }
|
||||
|
||||
protected:
|
||||
struct TuppleIndex : HBUINT16
|
||||
{
|
||||
enum Flags {
|
||||
EmbeddedPeakTuple = 0x8000u,
|
||||
IntermediateRegion = 0x4000u,
|
||||
PrivatePointNumbers = 0x2000u,
|
||||
TupleIndexMask = 0x0FFFu
|
||||
};
|
||||
|
||||
DEFINE_SIZE_STATIC (2);
|
||||
};
|
||||
|
||||
hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const
|
||||
{ return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); }
|
||||
hb_array_t<const F2DOT14> get_peak_tuple (unsigned axis_count) const
|
||||
{ return get_all_tuples (axis_count).sub_array (0, axis_count); }
|
||||
hb_array_t<const F2DOT14> get_start_tuple (unsigned axis_count) const
|
||||
{ return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); }
|
||||
hb_array_t<const F2DOT14> get_end_tuple (unsigned axis_count) const
|
||||
{ return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); }
|
||||
|
||||
HBUINT16 varDataSize; /* The size in bytes of the serialized
|
||||
* data for this tuple variation table. */
|
||||
TuppleIndex tupleIndex; /* A packed field. The high 4 bits are flags (see below).
|
||||
The low 12 bits are an index into a shared tuple
|
||||
records array. */
|
||||
/* UnsizedArrayOf<F2DOT14> peakTuple - optional */
|
||||
/* Peak tuple record for this tuple variation table — optional,
|
||||
* determined by flags in the tupleIndex value.
|
||||
*
|
||||
* Note that this must always be included in the 'cvar' table. */
|
||||
/* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */
|
||||
/* Intermediate start tuple record for this tuple variation table — optional,
|
||||
determined by flags in the tupleIndex value. */
|
||||
/* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */
|
||||
/* Intermediate end tuple record for this tuple variation table — optional,
|
||||
* determined by flags in the tupleIndex value. */
|
||||
public:
|
||||
DEFINE_SIZE_MIN (4);
|
||||
};
|
||||
|
||||
struct GlyphVariationData
|
||||
{
|
||||
const TupleVariationHeader &get_tuple_var_header (void) const
|
||||
{ return StructAfter<TupleVariationHeader> (data); }
|
||||
|
||||
struct tuple_iterator_t
|
||||
{
|
||||
void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_)
|
||||
{
|
||||
var_data_bytes = var_data_bytes_;
|
||||
var_data = var_data_bytes_.as<GlyphVariationData> ();
|
||||
index = 0;
|
||||
axis_count = axis_count_;
|
||||
current_tuple = &var_data->get_tuple_var_header ();
|
||||
data_offset = 0;
|
||||
}
|
||||
|
||||
bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */)
|
||||
{
|
||||
if (var_data->has_shared_point_numbers ())
|
||||
{
|
||||
const HBUINT8 *base = &(var_data+var_data->data);
|
||||
const HBUINT8 *p = base;
|
||||
if (!unpack_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false;
|
||||
data_offset = p - base;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_valid () const
|
||||
{
|
||||
return (index < var_data->tupleVarCount.get_count ()) &&
|
||||
var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) &&
|
||||
var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (),
|
||||
current_tuple->get_size (axis_count)));
|
||||
}
|
||||
|
||||
bool move_to_next ()
|
||||
{
|
||||
data_offset += current_tuple->get_data_size ();
|
||||
current_tuple = ¤t_tuple->get_next (axis_count);
|
||||
index++;
|
||||
return is_valid ();
|
||||
}
|
||||
|
||||
const HBUINT8 *get_serialized_data () const
|
||||
{ return &(var_data+var_data->data) + data_offset; }
|
||||
|
||||
private:
|
||||
const GlyphVariationData *var_data;
|
||||
unsigned int index;
|
||||
unsigned int axis_count;
|
||||
unsigned int data_offset;
|
||||
|
||||
public:
|
||||
hb_bytes_t var_data_bytes;
|
||||
const TupleVariationHeader *current_tuple;
|
||||
};
|
||||
|
||||
static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count,
|
||||
hb_vector_t<unsigned int> &shared_indices /* OUT */,
|
||||
tuple_iterator_t *iterator /* OUT */)
|
||||
{
|
||||
iterator->init (var_data_bytes, axis_count);
|
||||
if (!iterator->get_shared_indices (shared_indices))
|
||||
return false;
|
||||
return iterator->is_valid ();
|
||||
}
|
||||
|
||||
bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); }
|
||||
|
||||
static bool unpack_points (const HBUINT8 *&p /* IN/OUT */,
|
||||
hb_vector_t<unsigned int> &points /* OUT */,
|
||||
const HBUINT8 *end)
|
||||
{
|
||||
enum packed_point_flag_t
|
||||
{
|
||||
POINTS_ARE_WORDS = 0x80,
|
||||
POINT_RUN_COUNT_MASK = 0x7F
|
||||
};
|
||||
|
||||
if (unlikely (p + 1 > end)) return false;
|
||||
|
||||
unsigned count = *p++;
|
||||
if (count & POINTS_ARE_WORDS)
|
||||
{
|
||||
if (unlikely (p + 1 > end)) return false;
|
||||
count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++;
|
||||
}
|
||||
if (unlikely (!points.resize (count, false))) return false;
|
||||
|
||||
unsigned n = 0;
|
||||
unsigned i = 0;
|
||||
while (i < count)
|
||||
{
|
||||
if (unlikely (p + 1 > end)) return false;
|
||||
unsigned control = *p++;
|
||||
unsigned run_count = (control & POINT_RUN_COUNT_MASK) + 1;
|
||||
if (unlikely (i + run_count > count)) return false;
|
||||
unsigned j;
|
||||
if (control & POINTS_ARE_WORDS)
|
||||
{
|
||||
if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
|
||||
for (j = 0; j < run_count; j++, i++)
|
||||
{
|
||||
n += *(const HBUINT16 *)p;
|
||||
points.arrayZ[i] = n;
|
||||
p += HBUINT16::static_size;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (unlikely (p + run_count > end)) return false;
|
||||
for (j = 0; j < run_count; j++, i++)
|
||||
{
|
||||
n += *p++;
|
||||
points.arrayZ[i] = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool unpack_deltas (const HBUINT8 *&p /* IN/OUT */,
|
||||
hb_vector_t<int> &deltas /* IN/OUT */,
|
||||
const HBUINT8 *end)
|
||||
{
|
||||
enum packed_delta_flag_t
|
||||
{
|
||||
DELTAS_ARE_ZERO = 0x80,
|
||||
DELTAS_ARE_WORDS = 0x40,
|
||||
DELTA_RUN_COUNT_MASK = 0x3F
|
||||
};
|
||||
|
||||
unsigned i = 0;
|
||||
unsigned count = deltas.length;
|
||||
while (i < count)
|
||||
{
|
||||
if (unlikely (p + 1 > end)) return false;
|
||||
unsigned control = *p++;
|
||||
unsigned run_count = (control & DELTA_RUN_COUNT_MASK) + 1;
|
||||
if (unlikely (i + run_count > count)) return false;
|
||||
unsigned j;
|
||||
if (control & DELTAS_ARE_ZERO)
|
||||
{
|
||||
for (j = 0; j < run_count; j++, i++)
|
||||
deltas.arrayZ[i] = 0;
|
||||
}
|
||||
else if (control & DELTAS_ARE_WORDS)
|
||||
{
|
||||
if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
|
||||
for (j = 0; j < run_count; j++, i++)
|
||||
{
|
||||
deltas.arrayZ[i] = * (const HBINT16 *) p;
|
||||
p += HBUINT16::static_size;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (unlikely (p + run_count > end)) return false;
|
||||
for (j = 0; j < run_count; j++, i++)
|
||||
{
|
||||
deltas.arrayZ[i] = * (const HBINT8 *) p++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool has_data () const { return tupleVarCount; }
|
||||
|
||||
protected:
|
||||
struct TupleVarCount : HBUINT16
|
||||
{
|
||||
bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); }
|
||||
unsigned int get_count () const { return (*this) & CountMask; }
|
||||
|
||||
protected:
|
||||
enum Flags
|
||||
{
|
||||
SharedPointNumbers= 0x8000u,
|
||||
CountMask = 0x0FFFu
|
||||
};
|
||||
public:
|
||||
DEFINE_SIZE_STATIC (2);
|
||||
};
|
||||
|
||||
TupleVarCount tupleVarCount; /* A packed field. The high 4 bits are flags, and the
|
||||
* low 12 bits are the number of tuple variation tables
|
||||
* for this glyph. The number of tuple variation tables
|
||||
* can be any number between 1 and 4095. */
|
||||
Offset16To<HBUINT8>
|
||||
data; /* Offset from the start of the GlyphVariationData table
|
||||
* to the serialized data. */
|
||||
/* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
|
||||
public:
|
||||
DEFINE_SIZE_MIN (4);
|
||||
};
|
||||
struct GlyphVariationData : TupleVariationData
|
||||
{};
|
||||
|
||||
struct gvar
|
||||
{
|
||||
@ -406,8 +104,8 @@ struct gvar
|
||||
return_trace (c->check_struct (this) && (version.major == 1) &&
|
||||
sharedTuples.sanitize (c, this, axisCount * sharedTupleCount) &&
|
||||
(is_long_offset () ?
|
||||
c->check_array (get_long_offset_array (), glyphCount+1) :
|
||||
c->check_array (get_short_offset_array (), glyphCount+1)));
|
||||
c->check_array (get_long_offset_array (), c->get_num_glyphs () + 1) :
|
||||
c->check_array (get_short_offset_array (), c->get_num_glyphs () + 1)));
|
||||
}
|
||||
|
||||
/* GlyphVariationData not sanitized here; must be checked while accessing each glyph variation data */
|
||||
@ -418,6 +116,8 @@ struct gvar
|
||||
{
|
||||
TRACE_SUBSET (this);
|
||||
|
||||
unsigned glyph_count = version.to_int () ? c->plan->source->get_num_glyphs () : 0;
|
||||
|
||||
gvar *out = c->serializer->allocate_min<gvar> ();
|
||||
if (unlikely (!out)) return_trace (false);
|
||||
|
||||
@ -427,7 +127,7 @@ struct gvar
|
||||
out->sharedTupleCount = sharedTupleCount;
|
||||
|
||||
unsigned int num_glyphs = c->plan->num_output_glyphs ();
|
||||
out->glyphCount = num_glyphs;
|
||||
out->glyphCountX = hb_min (0xFFFFu, num_glyphs);
|
||||
|
||||
unsigned int subset_data_size = 0;
|
||||
for (hb_codepoint_t gid = (c->plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE) ? 0 : 1;
|
||||
@ -436,7 +136,7 @@ struct gvar
|
||||
{
|
||||
hb_codepoint_t old_gid;
|
||||
if (!c->plan->old_gid_for_new_gid (gid, &old_gid)) continue;
|
||||
subset_data_size += get_glyph_var_data_bytes (c->source_blob, old_gid).length;
|
||||
subset_data_size += get_glyph_var_data_bytes (c->source_blob, glyph_count, old_gid).length;
|
||||
}
|
||||
|
||||
bool long_offset = subset_data_size & ~0xFFFFu;
|
||||
@ -468,7 +168,9 @@ struct gvar
|
||||
{
|
||||
hb_codepoint_t old_gid;
|
||||
hb_bytes_t var_data_bytes = c->plan->old_gid_for_new_gid (gid, &old_gid)
|
||||
? get_glyph_var_data_bytes (c->source_blob, old_gid)
|
||||
? get_glyph_var_data_bytes (c->source_blob,
|
||||
glyph_count,
|
||||
old_gid)
|
||||
: hb_bytes_t ();
|
||||
|
||||
if (long_offset)
|
||||
@ -490,10 +192,12 @@ struct gvar
|
||||
}
|
||||
|
||||
protected:
|
||||
const hb_bytes_t get_glyph_var_data_bytes (hb_blob_t *blob, hb_codepoint_t glyph) const
|
||||
const hb_bytes_t get_glyph_var_data_bytes (hb_blob_t *blob,
|
||||
unsigned glyph_count,
|
||||
hb_codepoint_t glyph) const
|
||||
{
|
||||
unsigned start_offset = get_offset (glyph);
|
||||
unsigned end_offset = get_offset (glyph+1);
|
||||
unsigned start_offset = get_offset (glyph_count, glyph);
|
||||
unsigned end_offset = get_offset (glyph_count, glyph+1);
|
||||
if (unlikely (end_offset < start_offset)) return hb_bytes_t ();
|
||||
unsigned length = end_offset - start_offset;
|
||||
hb_bytes_t var_data = blob->as_bytes ().sub_array (((unsigned) dataZ) + start_offset, length);
|
||||
@ -502,9 +206,9 @@ struct gvar
|
||||
|
||||
bool is_long_offset () const { return flags & 1; }
|
||||
|
||||
unsigned get_offset (unsigned i) const
|
||||
unsigned get_offset (unsigned glyph_count, unsigned i) const
|
||||
{
|
||||
if (unlikely (i > glyphCount)) return 0;
|
||||
if (unlikely (i > glyph_count)) return 0;
|
||||
_hb_compiler_memory_r_barrier ();
|
||||
return is_long_offset () ? get_long_offset_array ()[i] : get_short_offset_array ()[i] * 2;
|
||||
}
|
||||
@ -516,7 +220,38 @@ struct gvar
|
||||
struct accelerator_t
|
||||
{
|
||||
accelerator_t (hb_face_t *face)
|
||||
{ table = hb_sanitize_context_t ().reference_table<gvar> (face); }
|
||||
{
|
||||
table = hb_sanitize_context_t ().reference_table<gvar> (face);
|
||||
/* If sanitize failed, set glyphCount to 0. */
|
||||
glyphCount = table->version.to_int () ? face->get_num_glyphs () : 0;
|
||||
|
||||
/* For shared tuples that only have one axis active, shared the index of
|
||||
* that axis as a cache. This will speed up caclulate_scalar() a lot
|
||||
* for fonts with lots of axes and many "monovar" tuples. */
|
||||
hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * table->axisCount);
|
||||
unsigned count = table->sharedTupleCount;
|
||||
if (unlikely (!shared_tuple_active_idx.resize (count, false))) return;
|
||||
unsigned axis_count = table->axisCount;
|
||||
for (unsigned i = 0; i < count; i++)
|
||||
{
|
||||
hb_array_t<const F2DOT14> tuple = shared_tuples.sub_array (axis_count * i, axis_count);
|
||||
int idx = -1;
|
||||
for (unsigned j = 0; j < axis_count; j++)
|
||||
{
|
||||
F2DOT14 peak = tuple.arrayZ[j];
|
||||
if (peak.to_int () != 0)
|
||||
{
|
||||
if (idx != -1)
|
||||
{
|
||||
idx = -1;
|
||||
break;
|
||||
}
|
||||
idx = j;
|
||||
}
|
||||
}
|
||||
shared_tuple_active_idx[i] = idx;
|
||||
}
|
||||
}
|
||||
~accelerator_t () { table.destroy (); }
|
||||
|
||||
private:
|
||||
@ -554,30 +289,26 @@ struct gvar
|
||||
{
|
||||
if (!coords) return true;
|
||||
|
||||
if (unlikely (glyph >= table->glyphCount)) return true;
|
||||
if (unlikely (glyph >= glyphCount)) return true;
|
||||
|
||||
hb_bytes_t var_data_bytes = table->get_glyph_var_data_bytes (table.get_blob (), glyph);
|
||||
hb_bytes_t var_data_bytes = table->get_glyph_var_data_bytes (table.get_blob (), glyphCount, glyph);
|
||||
if (!var_data_bytes.as<GlyphVariationData> ()->has_data ()) return true;
|
||||
hb_vector_t<unsigned int> shared_indices;
|
||||
GlyphVariationData::tuple_iterator_t iterator;
|
||||
if (!GlyphVariationData::get_tuple_iterator (var_data_bytes, table->axisCount,
|
||||
var_data_bytes.arrayZ,
|
||||
shared_indices, &iterator))
|
||||
return true; /* so isn't applied at all */
|
||||
|
||||
/* Save original points for inferred delta calculation */
|
||||
contour_point_vector_t orig_points_vec;
|
||||
orig_points_vec.extend (points);
|
||||
if (unlikely (orig_points_vec.in_error ())) return false;
|
||||
contour_point_vector_t orig_points_vec; // Populated lazily
|
||||
auto orig_points = orig_points_vec.as_array ();
|
||||
|
||||
contour_point_vector_t deltas_vec; /* flag is used to indicate referenced point */
|
||||
if (unlikely (!deltas_vec.resize (points.length, false))) return false;
|
||||
/* flag is used to indicate referenced point */
|
||||
contour_point_vector_t deltas_vec; // Populated lazily
|
||||
auto deltas = deltas_vec.as_array ();
|
||||
|
||||
hb_vector_t<unsigned> end_points;
|
||||
for (unsigned i = 0; i < points.length; ++i)
|
||||
if (points.arrayZ[i].is_end_point)
|
||||
end_points.push (i);
|
||||
hb_vector_t<unsigned> end_points; // Populated lazily
|
||||
|
||||
unsigned num_coords = table->axisCount;
|
||||
hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * table->axisCount);
|
||||
@ -585,15 +316,23 @@ struct gvar
|
||||
hb_vector_t<unsigned int> private_indices;
|
||||
hb_vector_t<int> x_deltas;
|
||||
hb_vector_t<int> y_deltas;
|
||||
bool flush = false;
|
||||
do
|
||||
{
|
||||
float scalar = iterator.current_tuple->calculate_scalar (coords, num_coords, shared_tuples);
|
||||
float scalar = iterator.current_tuple->calculate_scalar (coords, num_coords, shared_tuples,
|
||||
shared_tuple_active_idx.in_error () ? nullptr : &shared_tuple_active_idx);
|
||||
if (scalar == 0.f) continue;
|
||||
const HBUINT8 *p = iterator.get_serialized_data ();
|
||||
unsigned int length = iterator.current_tuple->get_data_size ();
|
||||
if (unlikely (!iterator.var_data_bytes.check_range (p, length)))
|
||||
return false;
|
||||
|
||||
if (!deltas)
|
||||
{
|
||||
if (unlikely (!deltas_vec.resize (points.length))) return false;
|
||||
deltas = deltas_vec.as_array ();
|
||||
}
|
||||
|
||||
const HBUINT8 *end = p + length;
|
||||
|
||||
bool has_private_points = iterator.current_tuple->has_private_points ();
|
||||
@ -609,16 +348,37 @@ struct gvar
|
||||
if (unlikely (!y_deltas.resize (num_deltas, false))) return false;
|
||||
if (unlikely (!GlyphVariationData::unpack_deltas (p, y_deltas, end))) return false;
|
||||
|
||||
hb_memset (deltas.arrayZ, 0, deltas.get_size ());
|
||||
if (!apply_to_all)
|
||||
{
|
||||
if (!orig_points)
|
||||
{
|
||||
orig_points_vec.extend (points);
|
||||
if (unlikely (orig_points_vec.in_error ())) return false;
|
||||
orig_points = orig_points_vec.as_array ();
|
||||
}
|
||||
|
||||
if (flush)
|
||||
{
|
||||
for (unsigned int i = 0; i < points.length; i++)
|
||||
points.arrayZ[i].translate (deltas.arrayZ[i]);
|
||||
flush = false;
|
||||
|
||||
}
|
||||
hb_memset (deltas.arrayZ, 0, deltas.get_size ());
|
||||
}
|
||||
|
||||
unsigned ref_points = 0;
|
||||
if (scalar != 1.0f)
|
||||
for (unsigned int i = 0; i < num_deltas; i++)
|
||||
{
|
||||
unsigned int pt_index = apply_to_all ? i : indices[i];
|
||||
if (unlikely (pt_index >= deltas.length)) continue;
|
||||
unsigned int pt_index;
|
||||
if (apply_to_all)
|
||||
pt_index = i;
|
||||
else
|
||||
{
|
||||
pt_index = indices[i];
|
||||
if (unlikely (pt_index >= deltas.length)) continue;
|
||||
}
|
||||
auto &delta = deltas.arrayZ[pt_index];
|
||||
ref_points += !delta.flag;
|
||||
delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */
|
||||
delta.x += x_deltas.arrayZ[i] * scalar;
|
||||
delta.y += y_deltas.arrayZ[i] * scalar;
|
||||
@ -626,23 +386,34 @@ struct gvar
|
||||
else
|
||||
for (unsigned int i = 0; i < num_deltas; i++)
|
||||
{
|
||||
unsigned int pt_index = apply_to_all ? i : indices[i];
|
||||
if (unlikely (pt_index >= deltas.length)) continue;
|
||||
unsigned int pt_index;
|
||||
if (apply_to_all)
|
||||
pt_index = i;
|
||||
else
|
||||
{
|
||||
pt_index = indices[i];
|
||||
if (unlikely (pt_index >= deltas.length)) continue;
|
||||
}
|
||||
auto &delta = deltas.arrayZ[pt_index];
|
||||
ref_points += !delta.flag;
|
||||
delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */
|
||||
delta.x += x_deltas.arrayZ[i];
|
||||
delta.y += y_deltas.arrayZ[i];
|
||||
}
|
||||
|
||||
/* infer deltas for unreferenced points */
|
||||
if (ref_points && ref_points < orig_points.length)
|
||||
if (!apply_to_all)
|
||||
{
|
||||
unsigned start_point = 0;
|
||||
for (unsigned c = 0; c < end_points.length; c++)
|
||||
if (!end_points)
|
||||
{
|
||||
unsigned end_point = end_points.arrayZ[c];
|
||||
for (unsigned i = 0; i < points.length; ++i)
|
||||
if (points.arrayZ[i].is_end_point)
|
||||
end_points.push (i);
|
||||
if (unlikely (end_points.in_error ())) return false;
|
||||
}
|
||||
|
||||
unsigned start_point = 0;
|
||||
for (unsigned end_point : end_points)
|
||||
{
|
||||
/* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */
|
||||
unsigned unref_count = 0;
|
||||
for (unsigned i = start_point; i < end_point + 1; i++)
|
||||
@ -689,14 +460,14 @@ struct gvar
|
||||
}
|
||||
}
|
||||
|
||||
/* apply specified / inferred deltas to points */
|
||||
for (unsigned int i = 0; i < points.length; i++)
|
||||
{
|
||||
points.arrayZ[i].x += deltas.arrayZ[i].x;
|
||||
points.arrayZ[i].y += deltas.arrayZ[i].y;
|
||||
}
|
||||
flush = true;
|
||||
|
||||
} while (iterator.move_to_next ());
|
||||
|
||||
if (flush)
|
||||
for (unsigned int i = 0; i < points.length; i++)
|
||||
points.arrayZ[i].translate (deltas.arrayZ[i]);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -704,6 +475,8 @@ struct gvar
|
||||
|
||||
private:
|
||||
hb_blob_ptr_t<gvar> table;
|
||||
unsigned glyphCount;
|
||||
hb_vector_t<signed> shared_tuple_active_idx;
|
||||
};
|
||||
|
||||
protected:
|
||||
@ -719,7 +492,7 @@ struct gvar
|
||||
NNOffset32To<UnsizedArrayOf<F2DOT14>>
|
||||
sharedTuples; /* Offset from the start of this table to the shared tuple records.
|
||||
* Array of tuple records shared across all glyph variation data tables. */
|
||||
HBUINT16 glyphCount; /* The number of glyphs in this font. This must match the number of
|
||||
HBUINT16 glyphCountX; /* The number of glyphs in this font. This must match the number of
|
||||
* glyphs stored elsewhere in the font. */
|
||||
HBUINT16 flags; /* Bit-field that gives the format of the offset array that follows.
|
||||
* If bit 0 is clear, the offsets are uint16; if bit 0 is set, the
|
||||
|
@ -185,12 +185,8 @@ struct hvarvvar_subset_plan_t
|
||||
{
|
||||
retain_adv_map = plan->flags & HB_SUBSET_FLAGS_RETAIN_GIDS;
|
||||
outer_map.add (0);
|
||||
for (hb_codepoint_t gid = 0; gid < plan->num_output_glyphs (); gid++)
|
||||
{
|
||||
hb_codepoint_t old_gid;
|
||||
if (plan->old_gid_for_new_gid (gid, &old_gid))
|
||||
inner_sets[0]->add (old_gid);
|
||||
}
|
||||
for (hb_codepoint_t old_gid : plan->glyphset()->iter())
|
||||
inner_sets[0]->add (old_gid);
|
||||
hb_set_union (adv_set, inner_sets[0]);
|
||||
}
|
||||
|
||||
@ -202,10 +198,12 @@ struct hvarvvar_subset_plan_t
|
||||
if (retain_adv_map)
|
||||
{
|
||||
for (hb_codepoint_t gid = 0; gid < plan->num_output_glyphs (); gid++)
|
||||
{
|
||||
if (inner_sets[0]->has (gid))
|
||||
inner_maps[0].add (gid);
|
||||
else
|
||||
inner_maps[0].skip ();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -265,6 +263,9 @@ struct HVARVVAR
|
||||
rsbMap.sanitize (c, this));
|
||||
}
|
||||
|
||||
const VariationStore& get_var_store () const
|
||||
{ return this+varStore; }
|
||||
|
||||
void listup_index_maps (hb_vector_t<const DeltaSetIndexMap *> &index_maps) const
|
||||
{
|
||||
index_maps.push (&(this+advMap));
|
||||
|
@ -4,7 +4,6 @@
|
||||
* Copyright © 2005 Werner Lemberg
|
||||
* Copyright © 2013-2015 Alexei Podtelezhnikov
|
||||
*
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
@ -85,7 +84,7 @@ void hb_outline_t::replay (hb_draw_funcs_t *pen, void *pen_data) const
|
||||
}
|
||||
}
|
||||
|
||||
float hb_outline_t::area () const
|
||||
float hb_outline_t::control_area () const
|
||||
{
|
||||
float a = 0;
|
||||
unsigned first = 0;
|
||||
@ -118,7 +117,7 @@ void hb_outline_t::embolden (float x_strength, float y_strength,
|
||||
x_strength /= 2.f;
|
||||
y_strength /= 2.f;
|
||||
|
||||
bool orientation_negative = area () < 0;
|
||||
bool orientation_negative = control_area () < 0;
|
||||
|
||||
signed first = 0;
|
||||
for (unsigned c = 0; c < contours.length; c++)
|
||||
|
@ -68,7 +68,7 @@ struct hb_outline_t
|
||||
void reset () { points.shrink (0, false); contours.resize (0); }
|
||||
|
||||
HB_INTERNAL void replay (hb_draw_funcs_t *pen, void *pen_data) const;
|
||||
HB_INTERNAL float area () const;
|
||||
HB_INTERNAL float control_area () const;
|
||||
HB_INTERNAL void embolden (float x_strength, float y_strength,
|
||||
float x_shift, float y_shift);
|
||||
|
||||
|
@ -616,7 +616,7 @@ typedef enum {
|
||||
HB_PAINT_COMPOSITE_MODE_HSL_HUE,
|
||||
HB_PAINT_COMPOSITE_MODE_HSL_SATURATION,
|
||||
HB_PAINT_COMPOSITE_MODE_HSL_COLOR,
|
||||
HB_PAINT_COMPOSITE_MODE_HSL_LUMINOSITY,
|
||||
HB_PAINT_COMPOSITE_MODE_HSL_LUMINOSITY
|
||||
} hb_paint_composite_mode_t;
|
||||
|
||||
/**
|
||||
|
@ -203,8 +203,8 @@ struct hb_paint_funcs_t
|
||||
if (!a)
|
||||
return false;
|
||||
|
||||
float cc = cosf (a * (float) M_PI);
|
||||
float ss = sinf (a * (float) M_PI);
|
||||
float cc = cosf (a * HB_PI);
|
||||
float ss = sinf (a * HB_PI);
|
||||
push_transform (paint_data, cc, ss, -ss, cc, 0.f, 0.f);
|
||||
return true;
|
||||
}
|
||||
@ -216,8 +216,8 @@ struct hb_paint_funcs_t
|
||||
if (!sx && !sy)
|
||||
return false;
|
||||
|
||||
float x = tanf (-sx * (float) M_PI);
|
||||
float y = tanf (+sy * (float) M_PI);
|
||||
float x = tanf (-sx * HB_PI);
|
||||
float y = tanf (+sy * HB_PI);
|
||||
push_transform (paint_data, 1.f, y, x, 1.f, 0.f, 0.f);
|
||||
return true;
|
||||
}
|
||||
|
@ -29,7 +29,16 @@
|
||||
|
||||
#include "hb.hh"
|
||||
|
||||
/* Memory pool for persistent allocation of small objects. */
|
||||
/* Memory pool for persistent allocation of small objects.
|
||||
*
|
||||
* Some AI musings on this, not necessarily true:
|
||||
*
|
||||
* This is a very simple implementation, but it's good enough for our
|
||||
* purposes. It's not thread-safe. It's not very fast. It's not
|
||||
* very memory efficient. It's not very cache efficient. It's not
|
||||
* very anything efficient. But it's simple and it works. And it's
|
||||
* good enough for our purposes. If you need something more
|
||||
* sophisticated, use a real allocator. Or use a real language. */
|
||||
|
||||
template <typename T, unsigned ChunkLen = 32>
|
||||
struct hb_pool_t
|
||||
|
@ -35,6 +35,12 @@
|
||||
*
|
||||
* Priority queue implemented as a binary heap. Supports extract minimum
|
||||
* and insert operations.
|
||||
*
|
||||
* The priority queue is implemented as a binary heap, which is a complete
|
||||
* binary tree. The root of the tree is the minimum element. The heap
|
||||
* property is that the priority of a node is less than or equal to the
|
||||
* priority of its children. The heap is stored in an array, with the
|
||||
* children of node i stored at indices 2i + 1 and 2i + 2.
|
||||
*/
|
||||
struct hb_priority_queue_t
|
||||
{
|
||||
|
@ -228,6 +228,18 @@ struct hb_sanitize_context_t :
|
||||
|
||||
unsigned get_edit_count () { return edit_count; }
|
||||
|
||||
|
||||
bool check_ops(unsigned count)
|
||||
{
|
||||
/* Avoid underflow */
|
||||
if (unlikely (this->max_ops < 0 || count >= (unsigned) this->max_ops))
|
||||
{
|
||||
this->max_ops = -1;
|
||||
return false;
|
||||
}
|
||||
return (this->max_ops -= (int) count) > 0;
|
||||
}
|
||||
|
||||
bool check_range (const void *base,
|
||||
unsigned int len) const
|
||||
{
|
||||
|
@ -196,4 +196,250 @@ hb_shape (hb_font_t *font,
|
||||
}
|
||||
|
||||
|
||||
#ifdef HB_EXPERIMENTAL_API
|
||||
|
||||
static float
|
||||
buffer_advance (hb_buffer_t *buffer)
|
||||
{
|
||||
float a = 0;
|
||||
auto *pos = buffer->pos;
|
||||
unsigned count = buffer->len;
|
||||
if (HB_DIRECTION_IS_HORIZONTAL (buffer->props.direction))
|
||||
for (unsigned i = 0; i < count; i++)
|
||||
a += pos[i].x_advance;
|
||||
else
|
||||
for (unsigned i = 0; i < count; i++)
|
||||
a += pos[i].y_advance;
|
||||
return a;
|
||||
}
|
||||
|
||||
static void
|
||||
reset_buffer (hb_buffer_t *buffer,
|
||||
hb_array_t<const hb_glyph_info_t> text)
|
||||
{
|
||||
assert (buffer->ensure (text.length));
|
||||
buffer->have_positions = false;
|
||||
buffer->len = text.length;
|
||||
memcpy (buffer->info, text.arrayZ, text.length * sizeof (buffer->info[0]));
|
||||
hb_buffer_set_content_type (buffer, HB_BUFFER_CONTENT_TYPE_UNICODE);
|
||||
}
|
||||
|
||||
/**
|
||||
* hb_shape_justify:
|
||||
* @font: a mutable #hb_font_t to use for shaping
|
||||
* @buffer: an #hb_buffer_t to shape
|
||||
* @features: (array length=num_features) (nullable): an array of user
|
||||
* specified #hb_feature_t or `NULL`
|
||||
* @num_features: the length of @features array
|
||||
* @shaper_list: (array zero-terminated=1) (nullable): a `NULL`-terminated
|
||||
* array of shapers to use or `NULL`
|
||||
* @min_target_advance: Minimum advance width/height to aim for.
|
||||
* @max_target_advance: Maximum advance width/height to aim for.
|
||||
* @advance: (inout): Input/output advance width/height of the buffer.
|
||||
* @var_tag: (out): Variation-axis tag used for justification.
|
||||
* @var_value: (out): Variation-axis value used to reach target justification.
|
||||
*
|
||||
* See hb_shape_full() for basic details. If @shaper_list is not `NULL`, the specified
|
||||
* shapers will be used in the given order, otherwise the default shapers list
|
||||
* will be used.
|
||||
*
|
||||
* In addition, justify the shaping results such that the shaping results reach
|
||||
* the target advance width/height, depending on the buffer direction.
|
||||
*
|
||||
* If the advance of the buffer shaped with hb_shape_full() is already known,
|
||||
* put that in *advance. Otherwise set *advance to zero.
|
||||
*
|
||||
* This API is currently experimental and will probably change in the future.
|
||||
*
|
||||
* Return value: false if all shapers failed, true otherwise
|
||||
*
|
||||
* XSince: EXPERIMENTAL
|
||||
**/
|
||||
hb_bool_t
|
||||
hb_shape_justify (hb_font_t *font,
|
||||
hb_buffer_t *buffer,
|
||||
const hb_feature_t *features,
|
||||
unsigned int num_features,
|
||||
const char * const *shaper_list,
|
||||
float min_target_advance,
|
||||
float max_target_advance,
|
||||
float *advance, /* IN/OUT */
|
||||
hb_tag_t *var_tag, /* OUT */
|
||||
float *var_value /* OUT */)
|
||||
{
|
||||
// TODO Negative font scales?
|
||||
|
||||
/* If default advance already matches target, nothing to do. Shape and return. */
|
||||
if (min_target_advance <= *advance && *advance <= max_target_advance)
|
||||
{
|
||||
*var_tag = HB_TAG_NONE;
|
||||
*var_value = 0.0f;
|
||||
return hb_shape_full (font, buffer,
|
||||
features, num_features,
|
||||
shaper_list);
|
||||
}
|
||||
|
||||
hb_face_t *face = font->face;
|
||||
|
||||
/* Choose variation tag to use for justification. */
|
||||
|
||||
hb_tag_t tag = HB_TAG_NONE;
|
||||
hb_ot_var_axis_info_t axis_info;
|
||||
|
||||
hb_tag_t tags[] =
|
||||
{
|
||||
HB_TAG ('j','s','t','f'),
|
||||
HB_TAG ('w','d','t','h'),
|
||||
};
|
||||
for (unsigned i = 0; i < ARRAY_LENGTH (tags); i++)
|
||||
if (hb_ot_var_find_axis_info (face, tags[i], &axis_info))
|
||||
{
|
||||
tag = *var_tag = tags[i];
|
||||
break;
|
||||
}
|
||||
|
||||
/* If no suitable variation axis found, can't justify. Just shape and return. */
|
||||
if (!tag)
|
||||
{
|
||||
*var_tag = HB_TAG_NONE;
|
||||
*var_value = 0.0f;
|
||||
if (hb_shape_full (font, buffer,
|
||||
features, num_features,
|
||||
shaper_list))
|
||||
{
|
||||
*advance = buffer_advance (buffer);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Copy buffer text as we need it so we can shape multiple times. */
|
||||
unsigned text_len = buffer->len;
|
||||
auto *text_info = (hb_glyph_info_t *) hb_malloc (text_len * sizeof (buffer->info[0]));
|
||||
if (unlikely (text_len && !text_info))
|
||||
return false;
|
||||
hb_memcpy (text_info, buffer->info, text_len * sizeof (buffer->info[0]));
|
||||
auto text = hb_array<const hb_glyph_info_t> (text_info, text_len);
|
||||
|
||||
/* If default advance was not provided to us, calculate it. */
|
||||
if (!*advance)
|
||||
{
|
||||
hb_font_set_variation (font, tag, axis_info.default_value);
|
||||
if (!hb_shape_full (font, buffer,
|
||||
features, num_features,
|
||||
shaper_list))
|
||||
return false;
|
||||
*advance = buffer_advance (buffer);
|
||||
}
|
||||
|
||||
/* If default advance already matches target, nothing to do. Shape and return.
|
||||
* Do this again, in case advance was just calculated.
|
||||
*/
|
||||
if (min_target_advance <= *advance && *advance <= max_target_advance)
|
||||
{
|
||||
*var_tag = HB_TAG_NONE;
|
||||
*var_value = 0.0f;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Prepare for running the solver. */
|
||||
double a, b, ya, yb;
|
||||
if (*advance < min_target_advance)
|
||||
{
|
||||
/* Need to expand. */
|
||||
ya = (double) *advance;
|
||||
a = (double) axis_info.default_value;
|
||||
b = (double) axis_info.max_value;
|
||||
|
||||
/* Shape buffer for maximum expansion to use as other
|
||||
* starting point for the solver. */
|
||||
hb_font_set_variation (font, tag, (float) b);
|
||||
reset_buffer (buffer, text);
|
||||
if (!hb_shape_full (font, buffer,
|
||||
features, num_features,
|
||||
shaper_list))
|
||||
return false;
|
||||
yb = (double) buffer_advance (buffer);
|
||||
/* If the maximum expansion is less than max target,
|
||||
* there's nothing to solve for. Just return it. */
|
||||
if (yb <= (double) max_target_advance)
|
||||
{
|
||||
*var_value = (float) b;
|
||||
*advance = (float) yb;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Need to shrink. */
|
||||
yb = (double) *advance;
|
||||
a = (double) axis_info.min_value;
|
||||
b = (double) axis_info.default_value;
|
||||
|
||||
/* Shape buffer for maximum shrinkate to use as other
|
||||
* starting point for the solver. */
|
||||
hb_font_set_variation (font, tag, (float) a);
|
||||
reset_buffer (buffer, text);
|
||||
if (!hb_shape_full (font, buffer,
|
||||
features, num_features,
|
||||
shaper_list))
|
||||
return false;
|
||||
ya = (double) buffer_advance (buffer);
|
||||
/* If the maximum shrinkate is more than min target,
|
||||
* there's nothing to solve for. Just return it. */
|
||||
if (ya >= (double) min_target_advance)
|
||||
{
|
||||
*var_value = (float) a;
|
||||
*advance = (float) ya;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Run the solver to find a var axis value that hits
|
||||
* the desired width. */
|
||||
|
||||
double epsilon = (b - a) / (1<<14);
|
||||
bool failed = false;
|
||||
|
||||
auto f = [&] (double x)
|
||||
{
|
||||
hb_font_set_variation (font, tag, (float) x);
|
||||
reset_buffer (buffer, text);
|
||||
if (unlikely (!hb_shape_full (font, buffer,
|
||||
features, num_features,
|
||||
shaper_list)))
|
||||
{
|
||||
failed = true;
|
||||
return (double) min_target_advance;
|
||||
}
|
||||
|
||||
double w = (double) buffer_advance (buffer);
|
||||
DEBUG_MSG (JUSTIFY, nullptr, "Trying '%c%c%c%c' axis parameter %f. Advance %g. Target: min %g max %g",
|
||||
HB_UNTAG (tag), x, w,
|
||||
(double) min_target_advance, (double) max_target_advance);
|
||||
return w;
|
||||
};
|
||||
|
||||
double y = 0;
|
||||
double itp = solve_itp (f,
|
||||
a, b,
|
||||
epsilon,
|
||||
(double) min_target_advance, (double) max_target_advance,
|
||||
ya, yb, y);
|
||||
|
||||
hb_free (text_info);
|
||||
|
||||
if (failed)
|
||||
return false;
|
||||
|
||||
*var_value = (float) itp;
|
||||
*advance = (float) y;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -53,6 +53,18 @@ hb_shape_full (hb_font_t *font,
|
||||
unsigned int num_features,
|
||||
const char * const *shaper_list);
|
||||
|
||||
HB_EXTERN hb_bool_t
|
||||
hb_shape_justify (hb_font_t *font,
|
||||
hb_buffer_t *buffer,
|
||||
const hb_feature_t *features,
|
||||
unsigned int num_features,
|
||||
const char * const *shaper_list,
|
||||
float min_target_advance,
|
||||
float max_target_advance,
|
||||
float *advance, /* IN/OUT */
|
||||
hb_tag_t *var_tag, /* OUT */
|
||||
float *var_value /* OUT */);
|
||||
|
||||
HB_EXTERN const char **
|
||||
hb_shape_list_shapers (void);
|
||||
|
||||
|
@ -39,7 +39,7 @@ HB_SHAPER_IMPLEMENT (graphite2)
|
||||
#endif
|
||||
|
||||
#ifndef HB_NO_OT_SHAPE
|
||||
HB_SHAPER_IMPLEMENT (ot) /* <--- This is our main OpenType shaper. */
|
||||
HB_SHAPER_IMPLEMENT (ot) /* <--- This is our main shaper. */
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_UNISCRIBE
|
||||
|
@ -36,9 +36,11 @@
|
||||
#include "OT/Color/COLR/COLR.hh"
|
||||
#include "hb-ot-glyf-table.hh"
|
||||
#include "hb-ot-head-table.hh"
|
||||
#include "hb-ot-hmtx-table.hh"
|
||||
#include "hb-ot-maxp-table.hh"
|
||||
|
||||
#ifndef HB_NO_VISIBILITY
|
||||
#include "hb-ot-name-language-static.hh"
|
||||
|
||||
uint64_t const _hb_NullPool[(HB_NULL_POOL_SIZE + sizeof (uint64_t) - 1) / sizeof (uint64_t)] = {};
|
||||
/*thread_local*/ uint64_t _hb_CrapPool[(HB_NULL_POOL_SIZE + sizeof (uint64_t) - 1) / sizeof (uint64_t)] = {};
|
||||
@ -108,4 +110,26 @@ hb_face_t::load_upem () const
|
||||
}
|
||||
|
||||
|
||||
#ifndef HB_NO_VAR
|
||||
bool
|
||||
_glyf_get_leading_bearing_with_var_unscaled (hb_font_t *font, hb_codepoint_t glyph, bool is_vertical,
|
||||
int *lsb)
|
||||
{
|
||||
return font->face->table.glyf->get_leading_bearing_with_var_unscaled (font, glyph, is_vertical, lsb);
|
||||
}
|
||||
|
||||
unsigned
|
||||
_glyf_get_advance_with_var_unscaled (hb_font_t *font, hb_codepoint_t glyph, bool is_vertical)
|
||||
{
|
||||
return font->face->table.glyf->get_advance_with_var_unscaled (font, glyph, is_vertical);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool
|
||||
_glyf_get_leading_bearing_without_var_unscaled (hb_face_t *face, hb_codepoint_t gid, bool is_vertical, int *lsb)
|
||||
{
|
||||
return face->table.glyf->get_leading_bearing_without_var_unscaled (gid, is_vertical, lsb);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -46,13 +46,13 @@
|
||||
static inline float
|
||||
_hb_angle_to_ratio (float a)
|
||||
{
|
||||
return tanf (a * float (-M_PI / 180.));
|
||||
return tanf (a * -HB_PI / 180.f);
|
||||
}
|
||||
|
||||
static inline float
|
||||
_hb_ratio_to_angle (float r)
|
||||
{
|
||||
return atanf (r) * float (-180. / M_PI);
|
||||
return atanf (r) * -180.f / HB_PI;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -71,7 +71,6 @@ hb_subset_input_t::hb_subset_input_t ()
|
||||
hb_tag_t default_no_subset_tables[] = {
|
||||
HB_TAG ('a', 'v', 'a', 'r'),
|
||||
HB_TAG ('g', 'a', 's', 'p'),
|
||||
HB_TAG ('c', 'v', 't', ' '),
|
||||
HB_TAG ('f', 'p', 'g', 'm'),
|
||||
HB_TAG ('p', 'r', 'e', 'p'),
|
||||
HB_TAG ('V', 'D', 'M', 'X'),
|
||||
|
@ -36,8 +36,10 @@
|
||||
#include "hb-ot-layout-gpos-table.hh"
|
||||
#include "hb-ot-layout-gsub-table.hh"
|
||||
#include "hb-ot-cff1-table.hh"
|
||||
#include "hb-ot-cff2-table.hh"
|
||||
#include "OT/Color/COLR/COLR.hh"
|
||||
#include "OT/Color/COLR/colrv1-closure.hh"
|
||||
#include "OT/Color/CPAL/CPAL.hh"
|
||||
#include "hb-ot-var-fvar-table.hh"
|
||||
#include "hb-ot-var-avar-table.hh"
|
||||
#include "hb-ot-stat-table.hh"
|
||||
@ -293,7 +295,7 @@ _closure_glyphs_lookups_features (hb_subset_plan_t *plan,
|
||||
feature_record_cond_idx_map,
|
||||
feature_substitutes_map);
|
||||
|
||||
if (table_tag == HB_OT_TAG_GSUB)
|
||||
if (table_tag == HB_OT_TAG_GSUB && !(plan->flags & HB_SUBSET_FLAGS_NO_LAYOUT_CLOSURE))
|
||||
hb_ot_layout_lookups_substitute_closure (plan->source,
|
||||
&lookup_indices,
|
||||
gids_to_retain);
|
||||
@ -345,7 +347,10 @@ _get_hb_font_with_variations (const hb_subset_plan_t *plan)
|
||||
hb_font_t *font = hb_font_create (plan->source);
|
||||
|
||||
hb_vector_t<hb_variation_t> vars;
|
||||
vars.alloc (plan->user_axes_location.get_population ());
|
||||
if (!vars.alloc (plan->user_axes_location.get_population ())) {
|
||||
hb_font_destroy (font);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
for (auto _ : plan->user_axes_location)
|
||||
{
|
||||
@ -381,7 +386,13 @@ _collect_layout_variation_indices (hb_subset_plan_t* plan)
|
||||
bool collect_delta = plan->pinned_at_default ? false : true;
|
||||
if (collect_delta)
|
||||
{
|
||||
font = _get_hb_font_with_variations (plan);
|
||||
if (unlikely (!plan->check_success (font = _get_hb_font_with_variations (plan)))) {
|
||||
hb_font_destroy (font);
|
||||
gdef.destroy ();
|
||||
gpos.destroy ();
|
||||
return;
|
||||
}
|
||||
|
||||
if (gdef->has_var_store ())
|
||||
{
|
||||
var_store = &(gdef->get_var_store ());
|
||||
@ -555,9 +566,12 @@ _populate_unicodes_to_retain (const hb_set_t *unicodes,
|
||||
if (plan->codepoint_to_glyph->has (cp))
|
||||
continue;
|
||||
|
||||
hb_codepoint_t gid = (*unicode_glyphid_map)[cp];
|
||||
plan->codepoint_to_glyph->set (cp, gid);
|
||||
plan->unicode_to_new_gid_list.push (hb_pair (cp, gid));
|
||||
hb_codepoint_t *gid;
|
||||
if (!unicode_glyphid_map->has(cp, &gid))
|
||||
continue;
|
||||
|
||||
plan->codepoint_to_glyph->set (cp, *gid);
|
||||
plan->unicode_to_new_gid_list.push (hb_pair (cp, *gid));
|
||||
}
|
||||
plan->unicode_to_new_gid_list.qsort ();
|
||||
}
|
||||
@ -609,7 +623,7 @@ _glyf_add_gid_and_children (const OT::glyf_accelerator_t &glyf,
|
||||
|
||||
gids_to_retain->add (gid);
|
||||
|
||||
for (auto item : glyf.glyph_for_gid (gid).get_composite_iterator ())
|
||||
for (auto &item : glyf.glyph_for_gid (gid).get_composite_iterator ())
|
||||
operation_count =
|
||||
_glyf_add_gid_and_children (glyf,
|
||||
item.get_gid (),
|
||||
@ -617,9 +631,53 @@ _glyf_add_gid_and_children (const OT::glyf_accelerator_t &glyf,
|
||||
operation_count,
|
||||
depth);
|
||||
|
||||
#ifndef HB_NO_VAR_COMPOSITES
|
||||
for (auto &item : glyf.glyph_for_gid (gid).get_var_composite_iterator ())
|
||||
{
|
||||
operation_count =
|
||||
_glyf_add_gid_and_children (glyf,
|
||||
item.get_gid (),
|
||||
gids_to_retain,
|
||||
operation_count,
|
||||
depth);
|
||||
}
|
||||
#endif
|
||||
|
||||
return operation_count;
|
||||
}
|
||||
|
||||
static void
|
||||
_nameid_closure (hb_subset_plan_t* plan,
|
||||
hb_set_t* drop_tables)
|
||||
{
|
||||
#ifndef HB_NO_STYLE
|
||||
plan->source->table.STAT->collect_name_ids (&plan->user_axes_location, &plan->name_ids);
|
||||
#endif
|
||||
#ifndef HB_NO_VAR
|
||||
if (!plan->all_axes_pinned)
|
||||
plan->source->table.fvar->collect_name_ids (&plan->user_axes_location, &plan->name_ids);
|
||||
#endif
|
||||
#ifndef HB_NO_COLOR
|
||||
if (!drop_tables->has (HB_OT_TAG_CPAL))
|
||||
plan->source->table.CPAL->collect_name_ids (&plan->colr_palettes, &plan->name_ids);
|
||||
#endif
|
||||
|
||||
#ifndef HB_NO_SUBSET_LAYOUT
|
||||
if (!drop_tables->has (HB_OT_TAG_GPOS))
|
||||
{
|
||||
hb_blob_ptr_t<GPOS> gpos = plan->source_table<GPOS> ();
|
||||
gpos->collect_name_ids (&plan->gpos_features, &plan->name_ids);
|
||||
gpos.destroy ();
|
||||
}
|
||||
if (!drop_tables->has (HB_OT_TAG_GSUB))
|
||||
{
|
||||
hb_blob_ptr_t<GSUB> gsub = plan->source_table<GSUB> ();
|
||||
gsub->collect_name_ids (&plan->gsub_features, &plan->name_ids);
|
||||
gsub.destroy ();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
_populate_gids_to_retain (hb_subset_plan_t* plan,
|
||||
hb_set_t* drop_tables)
|
||||
@ -673,6 +731,7 @@ _populate_gids_to_retain (hb_subset_plan_t* plan,
|
||||
|
||||
plan->_glyphset_colred = cur_glyphset;
|
||||
|
||||
_nameid_closure (plan, drop_tables);
|
||||
/* Populate a full set of glyphs to retain by adding all referenced
|
||||
* composite glyphs. */
|
||||
if (glyf.has_data ())
|
||||
@ -756,21 +815,6 @@ _create_old_gid_to_new_gid_map (const hb_face_t *face,
|
||||
;
|
||||
}
|
||||
|
||||
static void
|
||||
_nameid_closure (hb_face_t *face,
|
||||
hb_set_t *nameids,
|
||||
bool all_axes_pinned,
|
||||
hb_hashmap_t<hb_tag_t, float> *user_axes_location)
|
||||
{
|
||||
#ifndef HB_NO_STYLE
|
||||
face->table.STAT->collect_name_ids (user_axes_location, nameids);
|
||||
#endif
|
||||
#ifndef HB_NO_VAR
|
||||
if (!all_axes_pinned)
|
||||
face->table.fvar->collect_name_ids (user_axes_location, nameids);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef HB_NO_VAR
|
||||
static void
|
||||
_normalize_axes_location (hb_face_t *face, hb_subset_plan_t *plan)
|
||||
@ -783,12 +827,15 @@ _normalize_axes_location (hb_face_t *face, hb_subset_plan_t *plan)
|
||||
|
||||
bool has_avar = face->table.avar->has_data ();
|
||||
const OT::SegmentMaps *seg_maps = nullptr;
|
||||
unsigned avar_axis_count = 0;
|
||||
if (has_avar)
|
||||
{
|
||||
seg_maps = face->table.avar->get_segment_maps ();
|
||||
avar_axis_count = face->table.avar->get_axis_count();
|
||||
}
|
||||
|
||||
bool axis_not_pinned = false;
|
||||
unsigned old_axis_idx = 0, new_axis_idx = 0;
|
||||
unsigned int i = 0;
|
||||
for (const auto& axis : axes)
|
||||
{
|
||||
hb_tag_t axis_tag = axis.get_axis_tag ();
|
||||
@ -803,7 +850,7 @@ _normalize_axes_location (hb_face_t *face, hb_subset_plan_t *plan)
|
||||
else
|
||||
{
|
||||
int normalized_v = axis.normalize_axis_value (plan->user_axes_location.get (axis_tag));
|
||||
if (has_avar && old_axis_idx < face->table.avar->get_axis_count ())
|
||||
if (has_avar && old_axis_idx < avar_axis_count)
|
||||
{
|
||||
normalized_v = seg_maps->map (normalized_v);
|
||||
}
|
||||
@ -811,17 +858,99 @@ _normalize_axes_location (hb_face_t *face, hb_subset_plan_t *plan)
|
||||
if (normalized_v != 0)
|
||||
plan->pinned_at_default = false;
|
||||
|
||||
plan->normalized_coords[i] = normalized_v;
|
||||
plan->normalized_coords[old_axis_idx] = normalized_v;
|
||||
}
|
||||
if (has_avar)
|
||||
seg_maps = &StructAfter<OT::SegmentMaps> (*seg_maps);
|
||||
|
||||
old_axis_idx++;
|
||||
|
||||
i++;
|
||||
if (has_avar && old_axis_idx < avar_axis_count)
|
||||
seg_maps = &StructAfter<OT::SegmentMaps> (*seg_maps);
|
||||
}
|
||||
plan->all_axes_pinned = !axis_not_pinned;
|
||||
}
|
||||
|
||||
static void
|
||||
_update_instance_metrics_map_from_cff2 (hb_subset_plan_t *plan)
|
||||
{
|
||||
if (!plan->normalized_coords) return;
|
||||
OT::cff2::accelerator_t cff2 (plan->source);
|
||||
if (!cff2.is_valid ()) return;
|
||||
|
||||
hb_font_t *font = nullptr;
|
||||
if (unlikely (!plan->check_success (font = _get_hb_font_with_variations (plan))))
|
||||
{
|
||||
hb_font_destroy (font);
|
||||
return;
|
||||
}
|
||||
|
||||
hb_glyph_extents_t extents = {0x7FFF, -0x7FFF};
|
||||
OT::hmtx_accelerator_t _hmtx (plan->source);
|
||||
float *hvar_store_cache = nullptr;
|
||||
if (_hmtx.has_data () && _hmtx.var_table.get_length ())
|
||||
hvar_store_cache = _hmtx.var_table->get_var_store ().create_cache ();
|
||||
|
||||
OT::vmtx_accelerator_t _vmtx (plan->source);
|
||||
float *vvar_store_cache = nullptr;
|
||||
if (_vmtx.has_data () && _vmtx.var_table.get_length ())
|
||||
vvar_store_cache = _vmtx.var_table->get_var_store ().create_cache ();
|
||||
|
||||
for (auto p : *plan->glyph_map)
|
||||
{
|
||||
hb_codepoint_t old_gid = p.first;
|
||||
hb_codepoint_t new_gid = p.second;
|
||||
if (!cff2.get_extents (font, old_gid, &extents)) continue;
|
||||
bool has_bounds_info = true;
|
||||
if (extents.x_bearing == 0 && extents.width == 0 &&
|
||||
extents.height == 0 && extents.y_bearing == 0)
|
||||
has_bounds_info = false;
|
||||
|
||||
if (has_bounds_info)
|
||||
{
|
||||
plan->head_maxp_info.xMin = hb_min (plan->head_maxp_info.xMin, extents.x_bearing);
|
||||
plan->head_maxp_info.xMax = hb_max (plan->head_maxp_info.xMax, extents.x_bearing + extents.width);
|
||||
plan->head_maxp_info.yMax = hb_max (plan->head_maxp_info.yMax, extents.y_bearing);
|
||||
plan->head_maxp_info.yMin = hb_min (plan->head_maxp_info.yMin, extents.y_bearing + extents.height);
|
||||
}
|
||||
|
||||
if (_hmtx.has_data ())
|
||||
{
|
||||
int hori_aw = _hmtx.get_advance_without_var_unscaled (old_gid);
|
||||
if (_hmtx.var_table.get_length ())
|
||||
hori_aw += (int) roundf (_hmtx.var_table->get_advance_delta_unscaled (old_gid, font->coords, font->num_coords,
|
||||
hvar_store_cache));
|
||||
int lsb = extents.x_bearing;
|
||||
if (!has_bounds_info)
|
||||
{
|
||||
if (!_hmtx.get_leading_bearing_without_var_unscaled (old_gid, &lsb))
|
||||
continue;
|
||||
}
|
||||
plan->hmtx_map.set (new_gid, hb_pair ((unsigned) hori_aw, lsb));
|
||||
plan->bounds_width_map.set (new_gid, extents.width);
|
||||
}
|
||||
|
||||
if (_vmtx.has_data ())
|
||||
{
|
||||
int vert_aw = _vmtx.get_advance_without_var_unscaled (old_gid);
|
||||
if (_vmtx.var_table.get_length ())
|
||||
vert_aw += (int) roundf (_vmtx.var_table->get_advance_delta_unscaled (old_gid, font->coords, font->num_coords,
|
||||
vvar_store_cache));
|
||||
|
||||
int tsb = extents.y_bearing;
|
||||
if (!has_bounds_info)
|
||||
{
|
||||
if (!_vmtx.get_leading_bearing_without_var_unscaled (old_gid, &tsb))
|
||||
continue;
|
||||
}
|
||||
plan->vmtx_map.set (new_gid, hb_pair ((unsigned) vert_aw, tsb));
|
||||
plan->bounds_height_map.set (new_gid, extents.height);
|
||||
}
|
||||
}
|
||||
hb_font_destroy (font);
|
||||
if (hvar_store_cache)
|
||||
_hmtx.var_table->get_var_store ().destroy_cache (hvar_store_cache);
|
||||
if (vvar_store_cache)
|
||||
_vmtx.var_table->get_var_store ().destroy_cache (vvar_store_cache);
|
||||
}
|
||||
#endif
|
||||
|
||||
hb_subset_plan_t::hb_subset_plan_t (hb_face_t *face,
|
||||
@ -884,6 +1013,8 @@ hb_subset_plan_t::hb_subset_plan_t (hb_face_t *face,
|
||||
_populate_unicodes_to_retain (input->sets.unicodes, input->sets.glyphs, this);
|
||||
|
||||
_populate_gids_to_retain (this, input->sets.drop_tables);
|
||||
if (unlikely (in_error ()))
|
||||
return;
|
||||
|
||||
_create_old_gid_to_new_gid_map (face,
|
||||
input->flags & HB_SUBSET_FLAGS_RETAIN_GIDS,
|
||||
@ -905,10 +1036,13 @@ hb_subset_plan_t::hb_subset_plan_t (hb_face_t *face,
|
||||
glyph_map->get(unicode_to_new_gid_list.arrayZ[i].second);
|
||||
}
|
||||
|
||||
_nameid_closure (face, &name_ids, all_axes_pinned, &user_axes_location);
|
||||
if (unlikely (in_error ()))
|
||||
return;
|
||||
|
||||
#ifndef HB_NO_VAR
|
||||
_update_instance_metrics_map_from_cff2 (this);
|
||||
#endif
|
||||
|
||||
if (attach_accelerator_data)
|
||||
{
|
||||
hb_multimap_t gid_to_unicodes;
|
||||
|
@ -211,7 +211,7 @@ struct hb_subset_plan_t
|
||||
template<typename T>
|
||||
hb_blob_ptr_t<T> source_table()
|
||||
{
|
||||
hb_lock_t (accelerator ? &accelerator->sanitized_table_cache_lock : nullptr);
|
||||
hb_lock_t lock (accelerator ? &accelerator->sanitized_table_cache_lock : nullptr);
|
||||
|
||||
auto *cache = accelerator ? &accelerator->sanitized_table_cache : &sanitized_table_cache;
|
||||
if (cache
|
||||
|
@ -43,17 +43,14 @@
|
||||
#include "OT/Color/sbix/sbix.hh"
|
||||
#include "hb-ot-os2-table.hh"
|
||||
#include "hb-ot-post-table.hh"
|
||||
|
||||
#if !defined(AIX)
|
||||
#include "hb-ot-post-table-v2subset.hh"
|
||||
#endif
|
||||
|
||||
#include "hb-ot-cff1-table.hh"
|
||||
#include "hb-ot-cff2-table.hh"
|
||||
#include "hb-ot-vorg-table.hh"
|
||||
#include "hb-ot-name-table.hh"
|
||||
#include "hb-ot-layout-gsub-table.hh"
|
||||
#include "hb-ot-layout-gpos-table.hh"
|
||||
#include "hb-ot-var-cvar-table.hh"
|
||||
#include "hb-ot-var-fvar-table.hh"
|
||||
#include "hb-ot-var-gvar-table.hh"
|
||||
#include "hb-ot-var-hvar-table.hh"
|
||||
@ -482,6 +479,16 @@ _subset_table (hb_subset_plan_t *plan,
|
||||
if (plan->all_axes_pinned) return _subset<const OT::STAT> (plan, buf);
|
||||
else return _passthrough (plan, tag);
|
||||
|
||||
case HB_TAG ('c', 'v', 't', ' '):
|
||||
#ifndef HB_NO_VAR
|
||||
if (_is_table_present (plan->source, HB_OT_TAG_cvar) &&
|
||||
plan->normalized_coords && !plan->pinned_at_default)
|
||||
{
|
||||
auto &cvar = *plan->source->table.cvar;
|
||||
return OT::cvar::add_cvt_and_apply_deltas (plan, cvar.get_tuple_var_data (), &cvar);
|
||||
}
|
||||
#endif
|
||||
return _passthrough (plan, tag);
|
||||
default:
|
||||
if (plan->flags & HB_SUBSET_FLAGS_PASSTHROUGH_UNRECOGNIZED)
|
||||
return _passthrough (plan, tag);
|
||||
@ -630,8 +637,3 @@ hb_subset_plan_execute_or_fail (hb_subset_plan_t *plan)
|
||||
end:
|
||||
return success ? hb_face_reference (plan->dest) : nullptr;
|
||||
}
|
||||
|
||||
#ifndef HB_NO_VISIBILITY
|
||||
/* If NO_VISIBILITY, libharfbuzz has this. */
|
||||
#include "hb-ot-name-language-static.hh"
|
||||
#endif
|
||||
|
@ -71,6 +71,8 @@ typedef struct hb_subset_plan_t hb_subset_plan_t;
|
||||
* in the final subset.
|
||||
* @HB_SUBSET_FLAGS_NO_PRUNE_UNICODE_RANGES: If set then the unicode ranges in
|
||||
* OS/2 will not be recalculated.
|
||||
* @HB_SUBSET_FLAGS_NO_LAYOUT_CLOSURE: If set don't perform glyph closure on layout
|
||||
* substitution rules (GSUB). Since: 7.2.0.
|
||||
*
|
||||
* List of boolean properties that can be configured on the subset input.
|
||||
*
|
||||
@ -87,6 +89,7 @@ typedef enum { /*< flags >*/
|
||||
HB_SUBSET_FLAGS_NOTDEF_OUTLINE = 0x00000040u,
|
||||
HB_SUBSET_FLAGS_GLYPH_NAMES = 0x00000080u,
|
||||
HB_SUBSET_FLAGS_NO_PRUNE_UNICODE_RANGES = 0x00000100u,
|
||||
HB_SUBSET_FLAGS_NO_LAYOUT_CLOSURE = 0x00000200u,
|
||||
} hb_subset_flags_t;
|
||||
|
||||
/**
|
||||
|
@ -164,7 +164,7 @@ typedef enum
|
||||
* @HB_UNICODE_COMBINING_CLASS_CCC122: [Lao]
|
||||
* @HB_UNICODE_COMBINING_CLASS_CCC129: [Tibetan]
|
||||
* @HB_UNICODE_COMBINING_CLASS_CCC130: [Tibetan]
|
||||
* @HB_UNICODE_COMBINING_CLASS_CCC133: [Tibetan]
|
||||
* @HB_UNICODE_COMBINING_CLASS_CCC132: [Tibetan] Since: 7.2.0
|
||||
* @HB_UNICODE_COMBINING_CLASS_ATTACHED_BELOW_LEFT: Marks attached at the bottom left
|
||||
* @HB_UNICODE_COMBINING_CLASS_ATTACHED_BELOW: Marks attached directly below
|
||||
* @HB_UNICODE_COMBINING_CLASS_ATTACHED_ABOVE: Marks attached directly above
|
||||
@ -246,7 +246,7 @@ typedef enum
|
||||
/* Tibetan */
|
||||
HB_UNICODE_COMBINING_CLASS_CCC129 = 129,
|
||||
HB_UNICODE_COMBINING_CLASS_CCC130 = 130,
|
||||
HB_UNICODE_COMBINING_CLASS_CCC133 = 132,
|
||||
HB_UNICODE_COMBINING_CLASS_CCC132 = 132,
|
||||
|
||||
|
||||
HB_UNICODE_COMBINING_CLASS_ATTACHED_BELOW_LEFT = 200,
|
||||
|
@ -47,20 +47,20 @@ HB_BEGIN_DECLS
|
||||
*
|
||||
* The minor component of the library version available at compile-time.
|
||||
*/
|
||||
#define HB_VERSION_MINOR 0
|
||||
#define HB_VERSION_MINOR 2
|
||||
/**
|
||||
* HB_VERSION_MICRO:
|
||||
*
|
||||
* The micro component of the library version available at compile-time.
|
||||
*/
|
||||
#define HB_VERSION_MICRO 1
|
||||
#define HB_VERSION_MICRO 0
|
||||
|
||||
/**
|
||||
* HB_VERSION_STRING:
|
||||
*
|
||||
* A string literal containing the library version available at compile-time.
|
||||
*/
|
||||
#define HB_VERSION_STRING "7.0.1"
|
||||
#define HB_VERSION_STRING "7.2.0"
|
||||
|
||||
/**
|
||||
* HB_VERSION_ATLEAST:
|
||||
|
@ -246,7 +246,15 @@ extern "C" void hb_free_impl(void *ptr);
|
||||
* Compiler attributes
|
||||
*/
|
||||
|
||||
#if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE__)
|
||||
// gcc 10 has __has_builtin but not earlier versions. Sanction any gcc >= 5
|
||||
// clang defines it so no need.
|
||||
#ifdef __has_builtin
|
||||
#define hb_has_builtin __has_builtin
|
||||
#else
|
||||
#define hb_has_builtin(x) ((defined(__GNUC__) && __GNUC__ >= 5))
|
||||
#endif
|
||||
|
||||
#if defined(__OPTIMIZE__) && hb_has_builtin(__builtin_expect)
|
||||
#define likely(expr) (__builtin_expect (!!(expr), 1))
|
||||
#define unlikely(expr) (__builtin_expect (!!(expr), 0))
|
||||
#else
|
||||
@ -501,6 +509,12 @@ static_assert ((sizeof (hb_mask_t) == 4), "");
|
||||
static_assert ((sizeof (hb_var_int_t) == 4), "");
|
||||
|
||||
|
||||
/* Pie time. */
|
||||
// https://github.com/harfbuzz/harfbuzz/issues/4166
|
||||
#define HB_PI 3.14159265358979f
|
||||
#define HB_2_PI (2.f * HB_PI)
|
||||
|
||||
|
||||
/* Headers we include for everyone. Keep topologically sorted by dependency.
|
||||
* They express dependency amongst themselves, but no other file should include
|
||||
* them directly.*/
|
||||
|
Loading…
Reference in New Issue
Block a user