8313643: Update HarfBuzz to 8.2.2

Reviewed-by: jdv, prr, dnguyen
This commit is contained in:
Harshitha Onkar 2023-10-31 19:01:15 +00:00
parent 613a3cc689
commit 2182c93689
179 changed files with 11431 additions and 5620 deletions

View File

@ -495,6 +495,11 @@ else
# hb-ft.cc is not presently needed, and requires freetype 2.4.2 or later.
LIBFONTMANAGER_EXCLUDE_FILES += libharfbuzz/hb-ft.cc
# list of disabled warnings and the compilers for which it was specifically added.
# array-bounds -> GCC 12 on Alpine Linux
# parentheses -> GCC 6
# range-loop-analysis -> clang on Xcode12
HARFBUZZ_DISABLED_WARNINGS_gcc := missing-field-initializers strict-aliasing \
unused-result array-bounds parentheses
# noexcept-type required for GCC 7 builds. Not required for GCC 8+.

View File

@ -1,9 +1,7 @@
## Harfbuzz v7.2.0
## Harfbuzz v8.2.2
### Harfbuzz License
https://github.com/harfbuzz/harfbuzz/blob/7.2.0/COPYING
<pre>
HarfBuzz is licensed under the so-called "Old MIT" license. Details follow.
@ -14,6 +12,7 @@ Copyright © 2010-2023 Google, Inc.
Copyright © 2018-2020 Ebrahim Byagowi
Copyright © 2004-2013 Red Hat, Inc.
Copyright © 2019 Facebook, Inc.
Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com)
Copyright © 2007 Chris Wilson
Copyright © 2018-2019 Adobe Inc.
Copyright © 2006-2023 Behdad Esfahbod
@ -72,6 +71,15 @@ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
---------------------------------
The below license applies to the following files:
libharfbuzz/hb-unicode-emoji-table.hh
© 2023 Unicode®, Inc.
Unicode and the Unicode Logo are registered trademarks of Unicode, Inc.
in the U.S. and other countries.
For terms of use, see https://www.unicode.org/terms_of_use.html
</pre>
### AUTHORS File Information

View File

@ -397,7 +397,6 @@ struct IndexSubtableRecord
TRACE_SERIALIZE (this);
auto *subtable = c->serializer->start_embed<IndexSubtable> ();
if (unlikely (!subtable)) return_trace (false);
if (unlikely (!c->serializer->extend_min (subtable))) return_trace (false);
auto *old_subtable = get_subtable (base);
@ -545,7 +544,8 @@ struct IndexSubtableArray
const IndexSubtableRecord*>> *lookup /* OUT */) const
{
bool start_glyph_is_set = false;
for (hb_codepoint_t new_gid = 0; new_gid < c->plan->num_output_glyphs (); new_gid++)
unsigned num_glyphs = c->plan->num_output_glyphs ();
for (hb_codepoint_t new_gid = 0; new_gid < num_glyphs; new_gid++)
{
hb_codepoint_t old_gid;
if (unlikely (!c->plan->old_gid_for_new_gid (new_gid, &old_gid))) continue;
@ -576,9 +576,6 @@ struct IndexSubtableArray
{
TRACE_SUBSET (this);
auto *dst = c->serializer->start_embed<IndexSubtableArray> ();
if (unlikely (!dst)) return_trace (false);
hb_vector_t<hb_pair_t<hb_codepoint_t, const IndexSubtableRecord*>> lookup;
build_lookup (c, bitmap_size_context, &lookup);
if (unlikely (!c->serializer->propagate_error (lookup)))
@ -993,12 +990,10 @@ CBLC::subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *cblc_prime = c->serializer->start_embed<CBLC> ();
// Use a vector as a secondary buffer as the tables need to be built in parallel.
hb_vector_t<char> cbdt_prime;
if (unlikely (!cblc_prime)) return_trace (false);
auto *cblc_prime = c->serializer->start_embed<CBLC> ();
if (unlikely (!c->serializer->extend_min (cblc_prime))) return_trace (false);
cblc_prime->version = version;

View File

@ -53,6 +53,7 @@ struct Paint;
struct hb_paint_context_t :
hb_dispatch_context_t<hb_paint_context_t>
{
const char *get_name () { return "PAINT"; }
template <typename T>
return_t dispatch (const T &obj) { obj.paint_glyph (this); return hb_empty_t (); }
static return_t default_return_value () { return hb_empty_t (); }
@ -68,6 +69,8 @@ public:
unsigned int palette_index;
hb_color_t foreground;
VarStoreInstancer &instancer;
hb_map_t current_glyphs;
hb_map_t current_layers;
int depth_left = HB_MAX_NESTING_LEVEL;
int edge_count = HB_COLRV1_MAX_EDGE_COUNT;
@ -261,6 +264,7 @@ struct Variable
void paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
value.paint_glyph (c, varIdxBase);
}
@ -281,7 +285,7 @@ struct Variable
public:
VarIdx varIdxBase;
public:
DEFINE_SIZE_STATIC (4 + T::static_size);
DEFINE_SIZE_MIN (VarIdx::static_size + T::min_size);
};
template <typename T>
@ -315,6 +319,7 @@ struct NoVariable
void paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
value.paint_glyph (c, varIdxBase);
}
@ -332,7 +337,7 @@ struct NoVariable
T value;
public:
DEFINE_SIZE_STATIC (T::static_size);
DEFINE_SIZE_MIN (T::min_size);
};
// Color structures
@ -409,7 +414,6 @@ struct ColorLine
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (this);
if (unlikely (!out)) return_trace (false);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
if (!c->serializer->check_assign (out->extend, extend, HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
@ -559,6 +563,7 @@ struct Affine2x3
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
c->funcs->push_transform (c->data,
xx.to_float (c->instancer (varIdxBase, 0)),
yx.to_float (c->instancer (varIdxBase, 1)),
@ -640,6 +645,7 @@ struct PaintSolid
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
hb_bool_t is_foreground;
hb_color_t color;
@ -694,6 +700,7 @@ struct PaintLinearGradient
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
hb_color_line_t cl = {
(void *) &(this+colorLine),
(this+colorLine).static_get_color_stops, c,
@ -760,6 +767,7 @@ struct PaintRadialGradient
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
hb_color_line_t cl = {
(void *) &(this+colorLine),
(this+colorLine).static_get_color_stops, c,
@ -824,6 +832,7 @@ struct PaintSweepGradient
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
hb_color_line_t cl = {
(void *) &(this+colorLine),
(this+colorLine).static_get_color_stops, c,
@ -875,6 +884,7 @@ struct PaintGlyph
void paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
c->funcs->push_inverse_root_transform (c->data, c->font);
c->funcs->push_clip_glyph (c->data, gid, c->font);
c->funcs->push_root_transform (c->data, c->font);
@ -947,6 +957,7 @@ struct PaintTransform
void paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
(this+transform).paint_glyph (c);
c->recurse (this+src);
c->funcs->pop_transform (c->data);
@ -991,6 +1002,7 @@ struct PaintTranslate
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float ddx = dx + c->instancer (varIdxBase, 0);
float ddy = dy + c->instancer (varIdxBase, 1);
@ -1039,6 +1051,7 @@ struct PaintScale
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float sx = scaleX.to_float (c->instancer (varIdxBase, 0));
float sy = scaleY.to_float (c->instancer (varIdxBase, 1));
@ -1089,6 +1102,7 @@ struct PaintScaleAroundCenter
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float sx = scaleX.to_float (c->instancer (varIdxBase, 0));
float sy = scaleY.to_float (c->instancer (varIdxBase, 1));
float tCenterX = centerX + c->instancer (varIdxBase, 2);
@ -1142,6 +1156,7 @@ struct PaintScaleUniform
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float s = scale.to_float (c->instancer (varIdxBase, 0));
bool p1 = c->funcs->push_scale (c->data, s, s);
@ -1189,6 +1204,7 @@ struct PaintScaleUniformAroundCenter
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float s = scale.to_float (c->instancer (varIdxBase, 0));
float tCenterX = centerX + c->instancer (varIdxBase, 1);
float tCenterY = centerY + c->instancer (varIdxBase, 2);
@ -1240,6 +1256,7 @@ struct PaintRotate
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float a = angle.to_float (c->instancer (varIdxBase, 0));
bool p1 = c->funcs->push_rotate (c->data, a);
@ -1287,6 +1304,7 @@ struct PaintRotateAroundCenter
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float a = angle.to_float (c->instancer (varIdxBase, 0));
float tCenterX = centerX + c->instancer (varIdxBase, 1);
float tCenterY = centerY + c->instancer (varIdxBase, 2);
@ -1341,6 +1359,7 @@ struct PaintSkew
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float sx = xSkewAngle.to_float(c->instancer (varIdxBase, 0));
float sy = ySkewAngle.to_float(c->instancer (varIdxBase, 1));
@ -1391,6 +1410,7 @@ struct PaintSkewAroundCenter
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float sx = xSkewAngle.to_float(c->instancer (varIdxBase, 0));
float sy = ySkewAngle.to_float(c->instancer (varIdxBase, 1));
float tCenterX = centerX + c->instancer (varIdxBase, 2);
@ -1426,20 +1446,24 @@ struct PaintComposite
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (false);
if (!out->src.serialize_subset (c, src, this, instancer)) return_trace (false);
return_trace (out->backdrop.serialize_subset (c, backdrop, this, instancer));
bool ret = false;
ret |= out->src.serialize_subset (c, src, this, instancer);
ret |= out->backdrop.serialize_subset (c, backdrop, this, instancer);
return_trace (ret);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
c->check_ops (this->min_size) && // PainComposite can get exponential
src.sanitize (c, this) &&
backdrop.sanitize (c, this));
}
void paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
c->recurse (this+backdrop);
c->funcs->push_group (c->data);
c->recurse (this+src);
@ -1514,10 +1538,10 @@ struct ClipBoxFormat2 : Variable<ClipBoxFormat1>
value.get_clip_box(clip_box, instancer);
if (instancer)
{
clip_box.xMin += _hb_roundf (instancer (varIdxBase, 0));
clip_box.yMin += _hb_roundf (instancer (varIdxBase, 1));
clip_box.xMax += _hb_roundf (instancer (varIdxBase, 2));
clip_box.yMax += _hb_roundf (instancer (varIdxBase, 3));
clip_box.xMin += roundf (instancer (varIdxBase, 0));
clip_box.yMin += roundf (instancer (varIdxBase, 1));
clip_box.xMax += roundf (instancer (varIdxBase, 2));
clip_box.yMax += roundf (instancer (varIdxBase, 3));
}
}
};
@ -1898,15 +1922,16 @@ struct LayerList : Array32OfOffset32To<Paint>
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
bool ret = false;
for (const auto& _ : + hb_enumerate (*this)
| hb_filter (c->plan->colrv1_layers, hb_first))
{
auto *o = out->serialize_append (c->serializer);
if (unlikely (!o) || !o->serialize_subset (c, _.second, this, instancer))
return_trace (false);
if (unlikely (!o)) return_trace (false);
ret |= o->serialize_subset (c, _.second, this, instancer);
}
return_trace (true);
return_trace (ret);
}
bool sanitize (hb_sanitize_context_t *c) const
@ -2167,7 +2192,7 @@ struct COLR
if (version == 0 && (!base_it || !layer_it))
return_trace (false);
COLR *colr_prime = c->serializer->start_embed<COLR> ();
auto *colr_prime = c->serializer->start_embed<COLR> ();
if (unlikely (!c->serializer->extend_min (colr_prime))) return_trace (false);
if (version == 0)
@ -2284,6 +2309,7 @@ struct COLR
&(this+varIdxMap),
hb_array (font->coords, font->num_coords));
hb_paint_context_t c (this, funcs, data, font, palette_index, foreground, instancer);
c.current_glyphs.add (glyph);
if (version == 1)
{
@ -2399,18 +2425,42 @@ hb_paint_context_t::recurse (const Paint &paint)
void PaintColrLayers::paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
const LayerList &paint_offset_lists = c->get_colr_table ()->get_layerList ();
for (unsigned i = firstLayerIndex; i < firstLayerIndex + numLayers; i++)
{
if (unlikely (c->current_layers.has (i)))
continue;
c->current_layers.add (i);
const Paint &paint = paint_offset_lists.get_paint (i);
c->funcs->push_group (c->data);
c->recurse (paint);
c->funcs->pop_group (c->data, HB_PAINT_COMPOSITE_MODE_SRC_OVER);
c->current_layers.del (i);
}
}
void PaintColrGlyph::paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
if (unlikely (c->current_glyphs.has (gid)))
return;
c->current_glyphs.add (gid);
c->funcs->push_inverse_root_transform (c->data, c->font);
if (c->funcs->color_glyph (c->data, gid, c->font))
{
c->funcs->pop_transform (c->data);
c->current_glyphs.del (gid);
return;
}
c->funcs->pop_transform (c->data);
const COLR *colr_table = c->get_colr_table ();
const Paint *paint = colr_table->get_base_glyph_paint (gid);
@ -2429,6 +2479,8 @@ void PaintColrGlyph::paint_glyph (hb_paint_context_t *c) const
if (has_clip_box)
c->funcs->pop_clip (c->data);
c->current_glyphs.del (gid);
}
} /* namespace OT */

View File

@ -48,7 +48,6 @@ struct SBIXGlyph
{
TRACE_SERIALIZE (this);
SBIXGlyph* new_glyph = c->start_embed<SBIXGlyph> ();
if (unlikely (!new_glyph)) return_trace (nullptr);
if (unlikely (!c->extend_min (new_glyph))) return_trace (nullptr);
new_glyph->xOffset = xOffset;
@ -143,7 +142,6 @@ struct SBIXStrike
unsigned int num_output_glyphs = c->plan->num_output_glyphs ();
auto* out = c->serializer->start_embed<SBIXStrike> ();
if (unlikely (!out)) return_trace (false);
auto snap = c->serializer->snapshot ();
if (unlikely (!c->serializer->extend (out, num_output_glyphs + 1))) return_trace (false);
out->ppem = ppem;
@ -388,7 +386,6 @@ struct sbix
TRACE_SERIALIZE (this);
auto *out = c->serializer->start_embed<Array32OfOffset32To<SBIXStrike>> ();
if (unlikely (!out)) return_trace (false);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
hb_vector_t<Offset32To<SBIXStrike>*> new_strikes;
@ -423,8 +420,6 @@ struct sbix
{
TRACE_SUBSET (this);
sbix *sbix_prime = c->serializer->start_embed<sbix> ();
if (unlikely (!sbix_prime)) return_trace (false);
if (unlikely (!c->serializer->embed (this->version))) return_trace (false);
if (unlikely (!c->serializer->embed (this->flags))) return_trace (false);

View File

@ -57,6 +57,9 @@ struct Coverage
public:
DEFINE_SIZE_UNION (2, format);
#ifndef HB_OPTIMIZE_SIZE
HB_ALWAYS_INLINE
#endif
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
@ -113,22 +116,33 @@ struct Coverage
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
unsigned count = 0;
unsigned count = hb_len (glyphs);
unsigned num_ranges = 0;
hb_codepoint_t last = (hb_codepoint_t) -2;
hb_codepoint_t max = 0;
bool unsorted = false;
for (auto g: glyphs)
{
if (last != (hb_codepoint_t) -2 && g < last)
unsorted = true;
if (last + 1 != g)
num_ranges++;
last = g;
count++;
if (g > max) max = g;
}
u.format = count <= num_ranges * 3 ? 1 : 2;
u.format = !unsorted && count <= num_ranges * 3 ? 1 : 2;
#ifndef HB_NO_BEYOND_64K
if (count && last > 0xFFFFu)
if (max > 0xFFFFu)
u.format += 2;
if (unlikely (max > 0xFFFFFFu))
#else
if (unlikely (max > 0xFFFFu))
#endif
{
c->check_success (false, HB_SERIALIZE_ERROR_INT_OVERFLOW);
return_trace (false);
}
switch (u.format)
{
@ -148,8 +162,8 @@ struct Coverage
auto it =
+ iter ()
| hb_take (c->plan->source->get_num_glyphs ())
| hb_filter (c->plan->glyph_map_gsub)
| hb_map_retains_sorting (c->plan->glyph_map_gsub)
| hb_filter ([] (hb_codepoint_t glyph) { return glyph != HB_MAP_VALUE_INVALID; })
;
// Cache the iterator result as it will be iterated multiple times

View File

@ -79,7 +79,7 @@ struct CoverageFormat1_3
{
if (glyphArray.len > glyphs->get_population () * hb_bit_storage ((unsigned) glyphArray.len) / 2)
{
for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);)
for (auto g : *glyphs)
if (get_coverage (g) != NOT_COVERED)
return true;
return false;

View File

@ -95,19 +95,26 @@ struct CoverageFormat2_4
unsigned count = 0;
unsigned range = (unsigned) -1;
last = (hb_codepoint_t) -2;
unsigned unsorted = false;
for (auto g: glyphs)
{
if (last + 1 != g)
{
if (unlikely (last != (hb_codepoint_t) -2 && last + 1 > g))
unsorted = true;
range++;
rangeRecord[range].first = g;
rangeRecord[range].value = count;
rangeRecord.arrayZ[range].first = g;
rangeRecord.arrayZ[range].value = count;
}
rangeRecord[range].last = g;
rangeRecord.arrayZ[range].last = g;
last = g;
count++;
}
if (unlikely (unsorted))
rangeRecord.as_array ().qsort (RangeRecord<Types>::cmp_range);
return_trace (true);
}
@ -115,7 +122,7 @@ struct CoverageFormat2_4
{
if (rangeRecord.len > glyphs->get_population () * hb_bit_storage ((unsigned) rangeRecord.len) / 2)
{
for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);)
for (auto g : *glyphs)
if (get_coverage (g) != NOT_COVERED)
return true;
return false;
@ -185,8 +192,8 @@ struct CoverageFormat2_4
if (__more__ ())
{
unsigned int old = coverage;
j = c->rangeRecord[i].first;
coverage = c->rangeRecord[i].value;
j = c->rangeRecord.arrayZ[i].first;
coverage = c->rangeRecord.arrayZ[i].value;
if (unlikely (coverage != old + 1))
{
/* Broken table. Skip. Important to avoid DoS.

View File

@ -51,6 +51,18 @@ struct RangeRecord
int cmp (hb_codepoint_t g) const
{ return g < first ? -1 : g <= last ? 0 : +1; }
HB_INTERNAL static int cmp_range (const void *pa, const void *pb) {
const RangeRecord *a = (const RangeRecord *) pa;
const RangeRecord *b = (const RangeRecord *) pb;
if (a->first < b->first) return -1;
if (a->first > b->first) return +1;
if (a->last < b->last) return -1;
if (a->last > b->last) return +1;
if (a->value < b->value) return -1;
if (a->value > b->value) return +1;
return 0;
}
unsigned get_population () const
{
if (unlikely (last < first)) return 0;

View File

@ -29,9 +29,10 @@
#ifndef OT_LAYOUT_GDEF_GDEF_HH
#define OT_LAYOUT_GDEF_GDEF_HH
#include "../../../hb-ot-layout-common.hh"
#include "../../../hb-ot-var-common.hh"
#include "../../../hb-font.hh"
#include "../../../hb-cache.hh"
namespace OT {
@ -48,8 +49,6 @@ struct AttachPoint : Array16Of<HBUINT16>
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
return_trace (out->serialize (c->serializer, + iter ()));
}
};
@ -201,22 +200,23 @@ struct CaretValueFormat3
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
if (!c->serializer->embed (caretValueFormat)) return_trace (false);
if (!c->serializer->embed (coordinate)) return_trace (false);
unsigned varidx = (this+deviceTable).get_variation_index ();
if (c->plan->layout_variation_idx_delta_map.has (varidx))
hb_pair_t<unsigned, int> *new_varidx_delta;
if (!c->plan->layout_variation_idx_delta_map.has (varidx, &new_varidx_delta))
return_trace (false);
uint32_t new_varidx = hb_first (*new_varidx_delta);
int delta = hb_second (*new_varidx_delta);
if (delta != 0)
{
int delta = hb_second (c->plan->layout_variation_idx_delta_map.get (varidx));
if (delta != 0)
{
if (!c->serializer->check_assign (out->coordinate, coordinate + delta, HB_SERIALIZE_ERROR_INT_OVERFLOW))
return_trace (false);
}
if (!c->serializer->check_assign (out->coordinate, coordinate + delta, HB_SERIALIZE_ERROR_INT_OVERFLOW))
return_trace (false);
}
if (c->plan->all_axes_pinned)
if (new_varidx == HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
return_trace (c->serializer->check_assign (out->caretValueFormat, 1, HB_SERIALIZE_ERROR_INT_OVERFLOW));
if (!c->serializer->embed (deviceTable))
@ -441,6 +441,16 @@ struct MarkGlyphSetsFormat1
bool covers (unsigned int set_index, hb_codepoint_t glyph_id) const
{ return (this+coverage[set_index]).get_coverage (glyph_id) != NOT_COVERED; }
template <typename set_t>
void collect_coverage (hb_vector_t<set_t> &sets) const
{
for (const auto &offset : coverage)
{
const auto &cov = this+offset;
cov.collect_coverage (sets.push ());
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
@ -494,6 +504,15 @@ struct MarkGlyphSets
}
}
template <typename set_t>
void collect_coverage (hb_vector_t<set_t> &sets) const
{
switch (u.format) {
case 1: u.format1.collect_coverage (sets); return;
default:return;
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
@ -585,6 +604,26 @@ struct GDEFVersion1_2
(version.to_int () < 0x00010003u || varStore.sanitize (c, this)));
}
static void remap_varidx_after_instantiation (const hb_map_t& varidx_map,
hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>>& layout_variation_idx_delta_map /* IN/OUT */)
{
/* varidx_map is empty which means varstore is empty after instantiation,
* no variations, map all varidx to HB_OT_LAYOUT_NO_VARIATIONS_INDEX.
* varidx_map doesn't have original varidx, indicating delta row is all
* zeros, map varidx to HB_OT_LAYOUT_NO_VARIATIONS_INDEX */
for (auto _ : layout_variation_idx_delta_map.iter_ref ())
{
/* old_varidx->(varidx, delta) mapping generated for subsetting, then this
* varidx is used as key of varidx_map during instantiation */
uint32_t varidx = _.second.first;
uint32_t *new_varidx;
if (varidx_map.has (varidx, &new_varidx))
_.second.first = *new_varidx;
else
_.second.first = HB_OT_LAYOUT_NO_VARIATIONS_INDEX;
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
@ -607,6 +646,22 @@ struct GDEFVersion1_2
{
if (c->plan->all_axes_pinned)
out->varStore = 0;
else if (c->plan->normalized_coords)
{
if (varStore)
{
item_variations_t item_vars;
if (item_vars.instantiate (this+varStore, c->plan, true, true,
c->plan->gdef_varstore_inner_maps.as_array ()))
subset_varstore = out->varStore.serialize_serialize (c->serializer,
item_vars.has_long_word (),
c->plan->axis_tags,
item_vars.get_region_list (),
item_vars.get_vardata_encodings ());
remap_varidx_after_instantiation (item_vars.get_varidx_map (),
c->plan->layout_variation_idx_delta_map);
}
}
else
subset_varstore = out->varStore.serialize_subset (c, varStore, this, c->plan->gdef_varstore_inner_maps.as_array ());
}
@ -858,27 +913,79 @@ struct GDEF
hb_blob_destroy (table.get_blob ());
table = hb_blob_get_empty ();
}
#ifndef HB_NO_GDEF_CACHE
table->get_mark_glyph_sets ().collect_coverage (mark_glyph_set_digests);
#endif
}
~accelerator_t () { table.destroy (); }
unsigned int get_glyph_props (hb_codepoint_t glyph) const
{
unsigned v;
#ifndef HB_NO_GDEF_CACHE
if (glyph_props_cache.get (glyph, &v))
return v;
#endif
v = table->get_glyph_props (glyph);
#ifndef HB_NO_GDEF_CACHE
if (likely (table.get_blob ())) // Don't try setting if we are the null instance!
glyph_props_cache.set (glyph, v);
#endif
return v;
}
bool mark_set_covers (unsigned int set_index, hb_codepoint_t glyph_id) const
{
return
#ifndef HB_NO_GDEF_CACHE
mark_glyph_set_digests[set_index].may_have (glyph_id) &&
#endif
table->mark_set_covers (set_index, glyph_id);
}
hb_blob_ptr_t<GDEF> table;
#ifndef HB_NO_GDEF_CACHE
hb_vector_t<hb_set_digest_t> mark_glyph_set_digests;
mutable hb_cache_t<21, 3, 8> glyph_props_cache;
#endif
};
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{ get_lig_caret_list ().collect_variation_indices (c); }
void remap_layout_variation_indices (const hb_set_t *layout_variation_indices,
const hb_vector_t<int>& normalized_coords,
bool calculate_delta, /* not pinned at default */
bool no_variations, /* all axes pinned */
hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map /* OUT */) const
{
if (!has_var_store ()) return;
if (layout_variation_indices->is_empty ()) return;
const VariationStore &var_store = get_var_store ();
float *store_cache = var_store.create_cache ();
unsigned new_major = 0, new_minor = 0;
unsigned last_major = (layout_variation_indices->get_min ()) >> 16;
for (unsigned idx : layout_variation_indices->iter ())
{
int delta = 0;
if (calculate_delta)
delta = roundf (var_store.get_delta (idx, normalized_coords.arrayZ,
normalized_coords.length, store_cache));
if (no_variations)
{
layout_variation_idx_delta_map->set (idx, hb_pair_t<unsigned, int> (HB_OT_LAYOUT_NO_VARIATIONS_INDEX, delta));
continue;
}
uint16_t major = idx >> 16;
if (major >= get_var_store ().get_sub_table_count ()) break;
if (major >= var_store.get_sub_table_count ()) break;
if (major != last_major)
{
new_minor = 0;
@ -886,14 +993,11 @@ struct GDEF
}
unsigned new_idx = (new_major << 16) + new_minor;
if (!layout_variation_idx_delta_map->has (idx))
continue;
int delta = hb_second (layout_variation_idx_delta_map->get (idx));
layout_variation_idx_delta_map->set (idx, hb_pair_t<unsigned, int> (new_idx, delta));
++new_minor;
last_major = major;
}
var_store.destroy_cache (store_cache);
}
protected:

View File

@ -25,7 +25,9 @@ struct AnchorFormat3
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) && xDeviceTable.sanitize (c, this) && yDeviceTable.sanitize (c, this));
if (unlikely (!c->check_struct (this))) return_trace (false);
return_trace (xDeviceTable.sanitize (c, this) && yDeviceTable.sanitize (c, this));
}
void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id HB_UNUSED,
@ -35,9 +37,9 @@ struct AnchorFormat3
*x = font->em_fscale_x (xCoordinate);
*y = font->em_fscale_y (yCoordinate);
if (font->x_ppem || font->num_coords)
if ((font->x_ppem || font->num_coords) && xDeviceTable.sanitize (&c->sanitizer, this))
*x += (this+xDeviceTable).get_x_delta (font, c->var_store, c->var_store_cache);
if (font->y_ppem || font->num_coords)
if ((font->y_ppem || font->num_coords) && yDeviceTable.sanitize (&c->sanitizer, this))
*y += (this+yDeviceTable).get_y_delta (font, c->var_store, c->var_store_cache);
}
@ -45,15 +47,19 @@ struct AnchorFormat3
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
if (unlikely (!c->serializer->embed (format))) return_trace (false);
if (unlikely (!c->serializer->embed (xCoordinate))) return_trace (false);
if (unlikely (!c->serializer->embed (yCoordinate))) return_trace (false);
unsigned x_varidx = xDeviceTable ? (this+xDeviceTable).get_variation_index () : HB_OT_LAYOUT_NO_VARIATIONS_INDEX;
if (c->plan->layout_variation_idx_delta_map.has (x_varidx))
if (x_varidx != HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
{
int delta = hb_second (c->plan->layout_variation_idx_delta_map.get (x_varidx));
hb_pair_t<unsigned, int> *new_varidx_delta;
if (!c->plan->layout_variation_idx_delta_map.has (x_varidx, &new_varidx_delta))
return_trace (false);
x_varidx = hb_first (*new_varidx_delta);
int delta = hb_second (*new_varidx_delta);
if (delta != 0)
{
if (!c->serializer->check_assign (out->xCoordinate, xCoordinate + delta,
@ -63,9 +69,14 @@ struct AnchorFormat3
}
unsigned y_varidx = yDeviceTable ? (this+yDeviceTable).get_variation_index () : HB_OT_LAYOUT_NO_VARIATIONS_INDEX;
if (c->plan->layout_variation_idx_delta_map.has (y_varidx))
if (y_varidx != HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
{
int delta = hb_second (c->plan->layout_variation_idx_delta_map.get (y_varidx));
hb_pair_t<unsigned, int> *new_varidx_delta;
if (!c->plan->layout_variation_idx_delta_map.has (y_varidx, &new_varidx_delta))
return_trace (false);
y_varidx = hb_first (*new_varidx_delta);
int delta = hb_second (*new_varidx_delta);
if (delta != 0)
{
if (!c->serializer->check_assign (out->yCoordinate, yCoordinate + delta,
@ -74,7 +85,10 @@ struct AnchorFormat3
}
}
if (c->plan->all_axes_pinned)
/* in case that all axes are pinned or no variations after instantiation,
* both var_idxes will be mapped to HB_OT_LAYOUT_NO_VARIATIONS_INDEX */
if (x_varidx == HB_OT_LAYOUT_NO_VARIATIONS_INDEX &&
y_varidx == HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
return_trace (c->serializer->check_assign (out->format, 1, HB_SERIALIZE_ERROR_INT_OVERFLOW));
if (!c->serializer->embed (xDeviceTable)) return_trace (false);

View File

@ -21,18 +21,25 @@ struct AnchorMatrix
if (unlikely (hb_unsigned_mul_overflows (rows, cols))) return_trace (false);
unsigned int count = rows * cols;
if (!c->check_array (matrixZ.arrayZ, count)) return_trace (false);
if (c->lazy_some_gpos)
return_trace (true);
for (unsigned int i = 0; i < count; i++)
if (!matrixZ[i].sanitize (c, this)) return_trace (false);
return_trace (true);
}
const Anchor& get_anchor (unsigned int row, unsigned int col,
const Anchor& get_anchor (hb_ot_apply_context_t *c,
unsigned int row, unsigned int col,
unsigned int cols, bool *found) const
{
*found = false;
if (unlikely (row >= rows || col >= cols)) return Null (Anchor);
*found = !matrixZ[row * cols + col].is_null ();
return this+matrixZ[row * cols + col];
auto &offset = matrixZ[row * cols + col];
if (unlikely (!offset.sanitize (&c->sanitizer, this))) return Null (Anchor);
*found = !offset.is_null ();
return this+offset;
}
template <typename Iterator,
@ -58,14 +65,15 @@ struct AnchorMatrix
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->rows = num_rows;
bool ret = false;
for (const unsigned i : index_iter)
{
auto *offset = c->serializer->embed (matrixZ[i]);
if (!offset) return_trace (false);
offset->serialize_subset (c, matrixZ[i], this);
ret |= offset->serialize_subset (c, matrixZ[i], this);
}
return_trace (true);
return_trace (ret);
}
};

View File

@ -24,16 +24,17 @@ struct EntryExitRecord
(src_base+exitAnchor).collect_variation_indices (c);
}
EntryExitRecord* subset (hb_subset_context_t *c,
const void *src_base) const
bool subset (hb_subset_context_t *c,
const void *src_base) const
{
TRACE_SERIALIZE (this);
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (nullptr);
if (unlikely (!out)) return_trace (false);
out->entryAnchor.serialize_subset (c, entryAnchor, src_base);
out->exitAnchor.serialize_subset (c, exitAnchor, src_base);
return_trace (out);
bool ret = false;
ret |= out->entryAnchor.serialize_subset (c, entryAnchor, src_base);
ret |= out->exitAnchor.serialize_subset (c, exitAnchor, src_base);
return_trace (ret);
}
protected:
@ -91,7 +92,13 @@ struct CursivePosFormat1
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (coverage.sanitize (c, this) && entryExitRecord.sanitize (c, this));
if (unlikely (!coverage.sanitize (c, this)))
return_trace (false);
if (c->lazy_some_gpos)
return_trace (entryExitRecord.sanitize_shallow (c));
else
return_trace (entryExitRecord.sanitize (c, this));
}
bool intersects (const hb_set_t *glyphs) const
@ -119,19 +126,21 @@ struct CursivePosFormat1
hb_buffer_t *buffer = c->buffer;
const EntryExitRecord &this_record = entryExitRecord[(this+coverage).get_coverage (buffer->cur().codepoint)];
if (!this_record.entryAnchor) return_trace (false);
if (!this_record.entryAnchor ||
unlikely (!this_record.entryAnchor.sanitize (&c->sanitizer, this))) return_trace (false);
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_from;
if (!skippy_iter.prev (&unsafe_from))
if (unlikely (!skippy_iter.prev (&unsafe_from)))
{
buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1);
return_trace (false);
}
const EntryExitRecord &prev_record = entryExitRecord[(this+coverage).get_coverage (buffer->info[skippy_iter.idx].codepoint)];
if (!prev_record.exitAnchor)
if (!prev_record.exitAnchor ||
unlikely (!prev_record.exitAnchor.sanitize (&c->sanitizer, this)))
{
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
return_trace (false);
@ -200,8 +209,8 @@ struct CursivePosFormat1
* Arabic. */
unsigned int child = i;
unsigned int parent = j;
hb_position_t x_offset = entry_x - exit_x;
hb_position_t y_offset = entry_y - exit_y;
hb_position_t x_offset = roundf (entry_x - exit_x);
hb_position_t y_offset = roundf (entry_y - exit_y);
if (!(c->lookup_props & LookupFlag::RightToLeft))
{
unsigned int k = child;
@ -278,7 +287,6 @@ struct CursivePosFormat1
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
auto it =
+ hb_zip (this+coverage, entryExitRecord)

View File

@ -156,7 +156,7 @@ GPOS::position_finish_offsets (hb_font_t *font, hb_buffer_t *buffer)
{
for (unsigned i = 0; i < len; i++)
if (unlikely (pos[i].y_offset))
pos[i].x_offset += _hb_roundf (font->slant_xy * pos[i].y_offset);
pos[i].x_offset += roundf (font->slant_xy * pos[i].y_offset);
}
}

View File

@ -27,6 +27,7 @@ struct LigatureArray : List16OfOffset16To<LigatureAttach>
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
bool ret = false;
for (const auto _ : + hb_zip (coverage, *this)
| hb_filter (glyphset, hb_first))
{
@ -38,13 +39,13 @@ struct LigatureArray : List16OfOffset16To<LigatureAttach>
+ hb_range (src.rows * class_count)
| hb_filter ([=] (unsigned index) { return klass_mapping->has (index % class_count); })
;
matrix->serialize_subset (c,
_.second,
this,
src.rows,
indexes);
ret |= matrix->serialize_subset (c,
_.second,
this,
src.rows,
indexes);
}
return_trace (this->len);
return_trace (ret);
}
};

View File

@ -28,7 +28,7 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
const Anchor& mark_anchor = this + record.markAnchor;
bool found;
const Anchor& glyph_anchor = anchors.get_anchor (glyph_index, mark_class, class_count, &found);
const Anchor& glyph_anchor = anchors.get_anchor (c, glyph_index, mark_class, class_count, &found);
/* If this subtable doesn't have an anchor for this base and this class,
* return false such that the subsequent subtables have a chance at it. */
if (unlikely (!found)) return_trace (false);
@ -82,10 +82,10 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
| hb_map (hb_second)
;
bool ret = false;
unsigned new_length = 0;
for (const auto& mark_record : mark_iter) {
if (unlikely (!mark_record.subset (c, this, klass_mapping)))
return_trace (false);
ret |= mark_record.subset (c, this, klass_mapping);
new_length++;
}
@ -93,7 +93,7 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
HB_SERIALIZE_ERROR_ARRAY_OVERFLOW)))
return_trace (false);
return_trace (true);
return_trace (ret);
}
};

View File

@ -197,9 +197,10 @@ struct MarkBasePosFormat1_2
if (!out->markCoverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
out->markArray.serialize_subset (c, markArray, this,
(this+markCoverage).iter (),
&klass_mapping);
if (unlikely (!out->markArray.serialize_subset (c, markArray, this,
(this+markCoverage).iter (),
&klass_mapping)))
return_trace (false);
unsigned basecount = (this+baseArray).rows;
auto base_iter =
@ -228,11 +229,9 @@ struct MarkBasePosFormat1_2
;
}
out->baseArray.serialize_subset (c, baseArray, this,
base_iter.len (),
base_indexes.iter ());
return_trace (true);
return_trace (out->baseArray.serialize_subset (c, baseArray, this,
base_iter.len (),
base_indexes.iter ()));
}
};

View File

@ -169,7 +169,7 @@ struct MarkLigPosFormat1_2
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
const hb_map_t &glyph_map = c->plan->glyph_map_gsub;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
@ -195,23 +195,24 @@ struct MarkLigPosFormat1_2
if (!out->markCoverage.serialize_serialize (c->serializer, new_mark_coverage))
return_trace (false);
out->markArray.serialize_subset (c, markArray, this,
(this+markCoverage).iter (),
&klass_mapping);
if (unlikely (!out->markArray.serialize_subset (c, markArray, this,
(this+markCoverage).iter (),
&klass_mapping)))
return_trace (false);
auto new_ligature_coverage =
+ hb_iter (this + ligatureCoverage)
| hb_filter (glyphset)
| hb_take ((this + ligatureArray).len)
| hb_map_retains_sorting (glyph_map)
| hb_filter ([] (hb_codepoint_t glyph) { return glyph != HB_MAP_VALUE_INVALID; })
;
if (!out->ligatureCoverage.serialize_serialize (c->serializer, new_ligature_coverage))
return_trace (false);
out->ligatureArray.serialize_subset (c, ligatureArray, this,
hb_iter (this+ligatureCoverage), classCount, &klass_mapping);
return_trace (true);
return_trace (out->ligatureArray.serialize_subset (c, ligatureArray, this,
hb_iter (this+ligatureCoverage),
classCount, &klass_mapping));
}
};

View File

@ -100,16 +100,16 @@ struct MarkMarkPosFormat1_2
/* now we search backwards for a suitable mark glyph until a non-mark glyph */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.reset_fast (buffer->idx);
skippy_iter.set_lookup_props (c->lookup_props & ~(uint32_t)LookupFlag::IgnoreFlags);
unsigned unsafe_from;
if (!skippy_iter.prev (&unsafe_from))
if (unlikely (!skippy_iter.prev (&unsafe_from)))
{
buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1);
return_trace (false);
}
if (!_hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx]))
if (likely (!_hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx])))
{
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
return_trace (false);
@ -183,9 +183,10 @@ struct MarkMarkPosFormat1_2
if (!out->mark1Coverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
out->mark1Array.serialize_subset (c, mark1Array, this,
(this+mark1Coverage).iter (),
&klass_mapping);
if (unlikely (!out->mark1Array.serialize_subset (c, mark1Array, this,
(this+mark1Coverage).iter (),
&klass_mapping)))
return_trace (false);
unsigned mark2count = (this+mark2Array).rows;
auto mark2_iter =
@ -214,9 +215,10 @@ struct MarkMarkPosFormat1_2
;
}
out->mark2Array.serialize_subset (c, mark2Array, this, mark2_iter.len (), mark2_indexes.iter ());
return_trace (out->mark2Array.serialize_subset (c, mark2Array, this,
mark2_iter.len (),
mark2_indexes.iter ()));
return_trace (true);
}
};

View File

@ -24,17 +24,16 @@ struct MarkRecord
return_trace (c->check_struct (this) && markAnchor.sanitize (c, base));
}
MarkRecord *subset (hb_subset_context_t *c,
const void *src_base,
const hb_map_t *klass_mapping) const
bool subset (hb_subset_context_t *c,
const void *src_base,
const hb_map_t *klass_mapping) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (nullptr);
if (unlikely (!out)) return_trace (false);
out->klass = klass_mapping->get (klass);
out->markAnchor.serialize_subset (c, markAnchor, src_base);
return_trace (out);
return_trace (out->markAnchor.serialize_subset (c, markAnchor, src_base));
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,

View File

@ -110,9 +110,9 @@ struct PairPosFormat1_3
if (likely (index == NOT_COVERED)) return_trace (false);
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_to;
if (!skippy_iter.next (&unsafe_to))
if (unlikely (!skippy_iter.next (&unsafe_to)))
{
buffer->unsafe_to_concat (buffer->idx, unsafe_to);
return_trace (false);

View File

@ -50,13 +50,13 @@ struct PairPosFormat2_4
unsigned int len1 = valueFormat1.get_len ();
unsigned int len2 = valueFormat2.get_len ();
unsigned int stride = HBUINT16::static_size * (len1 + len2);
unsigned int record_size = valueFormat1.get_size () + valueFormat2.get_size ();
unsigned int count = (unsigned int) class1Count * (unsigned int) class2Count;
return_trace (c->check_range ((const void *) values,
count,
record_size) &&
valueFormat1.sanitize_values_stride_unsafe (c, this, &values[0], count, stride) &&
valueFormat2.sanitize_values_stride_unsafe (c, this, &values[len1], count, stride));
stride) &&
(c->lazy_some_gpos ||
(valueFormat1.sanitize_values_stride_unsafe (c, this, &values[0], count, stride) &&
valueFormat2.sanitize_values_stride_unsafe (c, this, &values[len1], count, stride))));
}
bool intersects (const hb_set_t *glyphs) const
@ -131,40 +131,46 @@ struct PairPosFormat2_4
if (likely (index == NOT_COVERED)) return_trace (false);
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_to;
if (!skippy_iter.next (&unsafe_to))
if (unlikely (!skippy_iter.next (&unsafe_to)))
{
buffer->unsafe_to_concat (buffer->idx, unsafe_to);
return_trace (false);
}
unsigned int klass2 = (this+classDef2).get_class (buffer->info[skippy_iter.idx].codepoint);
if (!klass2)
{
buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1);
return_trace (false);
}
unsigned int klass1 = (this+classDef1).get_class (buffer->cur().codepoint);
if (unlikely (klass1 >= class1Count || klass2 >= class2Count))
{
buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1);
return_trace (false);
}
unsigned int len1 = valueFormat1.get_len ();
unsigned int len2 = valueFormat2.get_len ();
unsigned int record_len = len1 + len2;
unsigned int klass1 = (this+classDef1).get_class (buffer->cur().codepoint);
unsigned int klass2 = (this+classDef2).get_class (buffer->info[skippy_iter.idx].codepoint);
if (unlikely (klass1 >= class1Count || klass2 >= class2Count))
{
buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1);
return_trace (false);
}
const Value *v = &values[record_len * (klass1 * class2Count + klass2)];
bool applied_first = false, applied_second = false;
/* Isolate simple kerning and apply it half to each side.
* Results in better cursor positinoing / underline drawing.
* Results in better cursor positioning / underline drawing.
*
* Disabled, because causes issues... :-(
* https://github.com/harfbuzz/harfbuzz/issues/3408
* https://github.com/harfbuzz/harfbuzz/pull/3235#issuecomment-1029814978
*/
#ifndef HB_SPLIT_KERN
if (0)
if (false)
#endif
{
if (!len2)
@ -224,8 +230,8 @@ struct PairPosFormat2_4
c->buffer->idx, skippy_iter.idx);
}
applied_first = valueFormat1.apply_value (c, this, v, buffer->cur_pos());
applied_second = valueFormat2.apply_value (c, this, v + len1, buffer->pos[skippy_iter.idx]);
applied_first = len1 && valueFormat1.apply_value (c, this, v, buffer->cur_pos());
applied_second = len2 && valueFormat2.apply_value (c, this, v + len1, buffer->pos[skippy_iter.idx]);
if (applied_first || applied_second)
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
@ -293,11 +299,13 @@ struct PairPosFormat2_4
out->valueFormat2 = out->valueFormat2.drop_device_table_flags ();
}
unsigned total_len = len1 + len2;
hb_vector_t<unsigned> class2_idxs (+ hb_range ((unsigned) class2Count) | hb_filter (klass2_map));
for (unsigned class1_idx : + hb_range ((unsigned) class1Count) | hb_filter (klass1_map))
{
for (unsigned class2_idx : + hb_range ((unsigned) class2Count) | hb_filter (klass2_map))
for (unsigned class2_idx : class2_idxs)
{
unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2);
unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * total_len;
valueFormat1.copy_values (c->serializer, out->valueFormat1, this, &values[idx], &c->plan->layout_variation_idx_delta_map);
valueFormat2.copy_values (c->serializer, out->valueFormat2, this, &values[idx + len1], &c->plan->layout_variation_idx_delta_map);
}

View File

@ -52,8 +52,9 @@ struct PairSet
unsigned int count = len;
const PairValueRecord *record = &firstPairValueRecord;
return_trace (closure->valueFormats[0].sanitize_values_stride_unsafe (c, this, &record->values[0], count, closure->stride) &&
closure->valueFormats[1].sanitize_values_stride_unsafe (c, this, &record->values[closure->len1], count, closure->stride));
return_trace (c->lazy_some_gpos ||
(closure->valueFormats[0].sanitize_values_stride_unsafe (c, this, &record->values[0], count, closure->stride) &&
closure->valueFormats[1].sanitize_values_stride_unsafe (c, this, &record->values[closure->len1], count, closure->stride)));
}
bool intersects (const hb_set_t *glyphs,
@ -120,8 +121,8 @@ struct PairSet
c->buffer->idx, pos);
}
bool applied_first = valueFormats[0].apply_value (c, this, &record->values[0], buffer->cur_pos());
bool applied_second = valueFormats[1].apply_value (c, this, &record->values[len1], buffer->pos[pos]);
bool applied_first = len1 && valueFormats[0].apply_value (c, this, &record->values[0], buffer->cur_pos());
bool applied_second = len2 && valueFormats[1].apply_value (c, this, &record->values[len1], buffer->pos[pos]);
if (applied_first || applied_second)
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())

View File

@ -22,7 +22,7 @@ struct PairValueRecord
ValueRecord values; /* Positioning data for the first glyph
* followed by for second glyph */
public:
DEFINE_SIZE_ARRAY (Types::size, values);
DEFINE_SIZE_ARRAY (Types::HBGlyphID::static_size, values);
int cmp (hb_codepoint_t k) const
{ return secondGlyph.cmp (k); }

View File

@ -90,6 +90,7 @@ struct SinglePosFormat1
bool
position_single (hb_font_t *font,
hb_blob_t *table_blob,
hb_direction_t direction,
hb_codepoint_t gid,
hb_glyph_position_t &pos) const
@ -100,7 +101,7 @@ struct SinglePosFormat1
/* This is ugly... */
hb_buffer_t buffer;
buffer.props.direction = direction;
OT::hb_ot_apply_context_t c (1, font, &buffer);
OT::hb_ot_apply_context_t c (1, font, &buffer, table_blob);
valueFormat.apply_value (&c, this, values, pos);
return true;

View File

@ -94,6 +94,7 @@ struct SinglePosFormat2
bool
position_single (hb_font_t *font,
hb_blob_t *table_blob,
hb_direction_t direction,
hb_codepoint_t gid,
hb_glyph_position_t &pos) const
@ -105,7 +106,7 @@ struct SinglePosFormat2
/* This is ugly... */
hb_buffer_t buffer;
buffer.props.direction = direction;
OT::hb_ot_apply_context_t c (1, font, &buffer);
OT::hb_ot_apply_context_t c (1, font, &buffer, table_blob);
valueFormat.apply_value (&c, this,
&values[index * valueFormat.get_len ()],

View File

@ -118,21 +118,25 @@ struct ValueFormat : HBUINT16
auto *cache = c->var_store_cache;
/* pixel -> fractional pixel */
if (format & xPlaDevice) {
if (use_x_device) glyph_pos.x_offset += (base + get_device (values, &ret)).get_x_delta (font, store, cache);
if (format & xPlaDevice)
{
if (use_x_device) glyph_pos.x_offset += get_device (values, &ret, base, c->sanitizer).get_x_delta (font, store, cache);
values++;
}
if (format & yPlaDevice) {
if (use_y_device) glyph_pos.y_offset += (base + get_device (values, &ret)).get_y_delta (font, store, cache);
if (format & yPlaDevice)
{
if (use_y_device) glyph_pos.y_offset += get_device (values, &ret, base, c->sanitizer).get_y_delta (font, store, cache);
values++;
}
if (format & xAdvDevice) {
if (horizontal && use_x_device) glyph_pos.x_advance += (base + get_device (values, &ret)).get_x_delta (font, store, cache);
if (format & xAdvDevice)
{
if (horizontal && use_x_device) glyph_pos.x_advance += get_device (values, &ret, base, c->sanitizer).get_x_delta (font, store, cache);
values++;
}
if (format & yAdvDevice) {
if (format & yAdvDevice)
{
/* y_advance values grow downward but font-space grows upward, hence negation */
if (!horizontal && use_y_device) glyph_pos.y_advance -= (base + get_device (values, &ret)).get_y_delta (font, store, cache);
if (!horizontal && use_y_device) glyph_pos.y_advance -= get_device (values, &ret, base, c->sanitizer).get_y_delta (font, store, cache);
values++;
}
return ret;
@ -174,6 +178,9 @@ struct ValueFormat : HBUINT16
if (format & xAdvance) x_adv = copy_value (c, new_format, xAdvance, *values++);
if (format & yAdvance) y_adv = copy_value (c, new_format, yAdvance, *values++);
if (!has_device ())
return;
if (format & xPlaDevice)
{
add_delta_to_value (x_placement, base, values, layout_variation_idx_delta_map);
@ -233,14 +240,12 @@ struct ValueFormat : HBUINT16
if (format & ValueFormat::xAdvDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
if (format & ValueFormat::yAdvDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
@ -277,10 +282,22 @@ struct ValueFormat : HBUINT16
{
return *static_cast<Offset16To<Device> *> (value);
}
static inline const Offset16To<Device>& get_device (const Value* value, bool *worked=nullptr)
static inline const Offset16To<Device>& get_device (const Value* value)
{
return *static_cast<const Offset16To<Device> *> (value);
}
static inline const Device& get_device (const Value* value,
bool *worked,
const void *base,
hb_sanitize_context_t &c)
{
if (worked) *worked |= bool (*value);
return *static_cast<const Offset16To<Device> *> (value);
auto &offset = *static_cast<const Offset16To<Device> *> (value);
if (unlikely (!offset.sanitize (&c, base)))
return Null(Device);
return base + offset;
}
void add_delta_to_value (HBINT16 *value,
@ -340,25 +357,26 @@ struct ValueFormat : HBUINT16
bool sanitize_value (hb_sanitize_context_t *c, const void *base, const Value *values) const
{
TRACE_SANITIZE (this);
return_trace (c->check_range (values, get_size ()) && (!has_device () || sanitize_value_devices (c, base, values)));
if (unlikely (!c->check_range (values, get_size ()))) return_trace (false);
if (c->lazy_some_gpos)
return_trace (true);
return_trace (!has_device () || sanitize_value_devices (c, base, values));
}
bool sanitize_values (hb_sanitize_context_t *c, const void *base, const Value *values, unsigned int count) const
{
TRACE_SANITIZE (this);
unsigned int len = get_len ();
unsigned size = get_size ();
if (!c->check_range (values, count, get_size ())) return_trace (false);
if (!c->check_range (values, count, size)) return_trace (false);
if (!has_device ()) return_trace (true);
if (c->lazy_some_gpos)
return_trace (true);
for (unsigned int i = 0; i < count; i++) {
if (!sanitize_value_devices (c, base, values))
return_trace (false);
values += len;
}
return_trace (true);
return_trace (sanitize_values_stride_unsafe (c, base, values, count, size));
}
/* Just sanitize referenced Device tables. Doesn't check the values themselves. */

View File

@ -8,8 +8,6 @@ namespace OT {
namespace Layout {
namespace GSUB_impl {
typedef hb_pair_t<hb_codepoint_t, hb_codepoint_t> hb_codepoint_pair_t;
template<typename Iterator>
static void SingleSubst_serialize (hb_serialize_context_t *c,
Iterator it);

View File

@ -10,10 +10,10 @@ namespace GSUB_impl {
template <typename Types>
struct Ligature
{
protected:
public:
typename Types::HBGlyphID
ligGlyph; /* GlyphID of ligature to substitute */
HeadlessArrayOf<typename Types::HBGlyphID>
HeadlessArray16Of<typename Types::HBGlyphID>
component; /* Array of component GlyphIDs--start
* with the second component--ordered
* in writing direction */

View File

@ -75,12 +75,69 @@ struct LigatureSet
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
unsigned int num_ligs = ligature.len;
#ifndef HB_NO_OT_RULESETS_FAST_PATH
if (HB_OPTIMIZE_SIZE_VAL || num_ligs <= 4)
#endif
{
slow:
for (unsigned int i = 0; i < num_ligs; i++)
{
const auto &lig = this+ligature.arrayZ[i];
if (lig.apply (c)) return_trace (true);
}
return_trace (false);
}
/* This version is optimized for speed by matching the first component
* of the ligature here, instead of calling into the ligation code.
*
* This is replicated in ChainRuleSet and RuleSet. */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (c->buffer->idx);
skippy_iter.set_match_func (match_always, nullptr);
skippy_iter.set_glyph_data ((HBUINT16 *) nullptr);
unsigned unsafe_to;
hb_codepoint_t first = (unsigned) -1;
bool matched = skippy_iter.next (&unsafe_to);
if (likely (matched))
{
first = c->buffer->info[skippy_iter.idx].codepoint;
unsafe_to = skippy_iter.idx + 1;
if (skippy_iter.may_skip (c->buffer->info[skippy_iter.idx]))
{
/* Can't use the fast path if eg. the next char is a default-ignorable
* or other skippable. */
goto slow;
}
}
else
goto slow;
bool unsafe_to_concat = false;
for (unsigned int i = 0; i < num_ligs; i++)
{
const auto &lig = this+ligature[i];
if (lig.apply (c)) return_trace (true);
const auto &lig = this+ligature.arrayZ[i];
if (unlikely (lig.component.lenP1 <= 1) ||
lig.component.arrayZ[0] == first)
{
if (lig.apply (c))
{
if (unsafe_to_concat)
c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
return_trace (true);
}
}
else if (likely (lig.component.lenP1 > 1))
unsafe_to_concat = true;
}
if (likely (unsafe_to_concat))
c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
return_trace (false);
}

View File

@ -191,7 +191,6 @@ struct ReverseChainSingleSubstFormat1
TRACE_SERIALIZE (this);
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->check_success (out))) return_trace (false);
if (unlikely (!c->serializer->embed (this->format))) return_trace (false);
if (unlikely (!c->serializer->embed (this->coverage))) return_trace (false);

View File

@ -53,7 +53,7 @@ struct Sequence
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (multiple subtitution)",
"replaced glyph at %u (multiple substitution)",
c->buffer->idx - 1u);
}

View File

@ -57,7 +57,7 @@ struct SingleSubst
#ifndef HB_NO_BEYOND_64K
if (+ glyphs
| hb_map_retains_sorting (hb_first)
| hb_map_retains_sorting (hb_second)
| hb_filter ([] (hb_codepoint_t gid) { return gid > 0xFFFFu; }))
{
format += 2;

View File

@ -87,19 +87,54 @@ struct CompositeGlyphRecord
}
}
void transform_points (contour_point_vector_t &points,
static void transform (const float (&matrix)[4],
hb_array_t<contour_point_t> points)
{
if (matrix[0] != 1.f || matrix[1] != 0.f ||
matrix[2] != 0.f || matrix[3] != 1.f)
for (auto &point : points)
point.transform (matrix);
}
static void translate (const contour_point_t &trans,
hb_array_t<contour_point_t> points)
{
if (HB_OPTIMIZE_SIZE_VAL)
{
if (trans.x != 0.f || trans.y != 0.f)
for (auto &point : points)
point.translate (trans);
}
else
{
if (trans.x != 0.f && trans.y != 0.f)
for (auto &point : points)
point.translate (trans);
else
{
if (trans.x != 0.f)
for (auto &point : points)
point.x += trans.x;
else if (trans.y != 0.f)
for (auto &point : points)
point.y += trans.y;
}
}
}
void transform_points (hb_array_t<contour_point_t> points,
const float (&matrix)[4],
const contour_point_t &trans) const
{
if (scaled_offsets ())
{
points.translate (trans);
points.transform (matrix);
translate (trans, points);
transform (matrix, points);
}
else
{
points.transform (matrix);
points.translate (trans);
transform (matrix, points);
translate (trans, points);
}
}
@ -108,8 +143,8 @@ struct CompositeGlyphRecord
float matrix[4];
contour_point_t trans;
get_transformation (matrix, trans);
if (unlikely (!points.resize (points.length + 1))) return false;
points[points.length - 1] = trans;
if (unlikely (!points.alloc (points.length + 4))) return false; // For phantom points
points.push (trans);
return true;
}
@ -358,7 +393,7 @@ struct CompositeGlyph
{
/* last 4 points in points_with_deltas are phantom points and should not be included */
if (i >= points_with_deltas.length - 4) {
free (o);
hb_free (o);
return false;
}

View File

@ -103,6 +103,63 @@ struct Glyph
}
}
bool get_all_points_without_var (const hb_face_t *face,
contour_point_vector_t &points /* OUT */) const
{
switch (type) {
case SIMPLE:
if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (points)))
return false;
break;
case COMPOSITE:
{
for (auto &item : get_composite_iterator ())
if (unlikely (!item.get_points (points))) return false;
break;
}
#ifndef HB_NO_VAR_COMPOSITES
case VAR_COMPOSITE:
{
for (auto &item : get_var_composite_iterator ())
if (unlikely (!item.get_points (points))) return false;
break;
}
#endif
case EMPTY:
break;
}
/* Init phantom points */
if (unlikely (!points.resize (points.length + PHANTOM_COUNT))) return false;
hb_array_t<contour_point_t> phantoms = points.as_array ().sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT);
{
int lsb = 0;
int h_delta = face->table.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb) ?
(int) header->xMin - lsb : 0;
HB_UNUSED int tsb = 0;
int v_orig = (int) header->yMax +
#ifndef HB_NO_VERTICAL
((void) face->table.vmtx->get_leading_bearing_without_var_unscaled (gid, &tsb), tsb)
#else
0
#endif
;
unsigned h_adv = face->table.hmtx->get_advance_without_var_unscaled (gid);
unsigned v_adv =
#ifndef HB_NO_VERTICAL
face->table.vmtx->get_advance_without_var_unscaled (gid)
#else
- face->get_upem ()
#endif
;
phantoms[PHANTOM_LEFT].x = h_delta;
phantoms[PHANTOM_RIGHT].x = (int) h_adv + h_delta;
phantoms[PHANTOM_TOP].y = v_orig;
phantoms[PHANTOM_BOTTOM].y = v_orig - (int) v_adv;
}
return true;
}
void update_mtx (const hb_subset_plan_t *plan,
int xMin, int xMax,
int yMin, int yMax,
@ -114,8 +171,8 @@ struct Glyph
if (type != EMPTY)
{
plan->bounds_width_map.set (new_gid, xMax - xMin);
plan->bounds_height_map.set (new_gid, yMax - yMin);
plan->bounds_width_vec[new_gid] = xMax - xMin;
plan->bounds_height_vec[new_gid] = yMax - yMin;
}
unsigned len = all_points.length;
@ -124,10 +181,12 @@ struct Glyph
float topSideY = all_points[len - 2].y;
float bottomSideY = all_points[len - 1].y;
uint32_t hash = hb_hash (new_gid);
signed hori_aw = roundf (rightSideX - leftSideX);
if (hori_aw < 0) hori_aw = 0;
int lsb = roundf (xMin - leftSideX);
plan->hmtx_map.set (new_gid, hb_pair ((unsigned) hori_aw, lsb));
plan->hmtx_map.set_with_hash (new_gid, hash, hb_pair ((unsigned) hori_aw, lsb));
//flag value should be computed using non-empty glyphs
if (type != EMPTY && lsb != xMin)
plan->head_maxp_info.allXMinIsLsb = false;
@ -135,7 +194,7 @@ struct Glyph
signed vert_aw = roundf (topSideY - bottomSideY);
if (vert_aw < 0) vert_aw = 0;
int tsb = roundf (topSideY - yMax);
plan->vmtx_map.set (new_gid, hb_pair ((unsigned) vert_aw, tsb));
plan->vmtx_map.set_with_hash (new_gid, hash, hb_pair ((unsigned) vert_aw, tsb));
}
bool compile_header_bytes (const hb_subset_plan_t *plan,
@ -155,24 +214,28 @@ struct Glyph
{
xMin = xMax = all_points[0].x;
yMin = yMax = all_points[0].y;
unsigned count = all_points.length - 4;
for (unsigned i = 1; i < count; i++)
{
float x = all_points[i].x;
float y = all_points[i].y;
xMin = hb_min (xMin, x);
xMax = hb_max (xMax, x);
yMin = hb_min (yMin, y);
yMax = hb_max (yMax, y);
}
}
for (unsigned i = 1; i < all_points.length - 4; i++)
{
float x = all_points[i].x;
float y = all_points[i].y;
xMin = hb_min (xMin, x);
xMax = hb_max (xMax, x);
yMin = hb_min (yMin, y);
yMax = hb_max (yMax, y);
}
update_mtx (plan, roundf (xMin), roundf (xMax), roundf (yMin), roundf (yMax), all_points);
// These are destined for storage in a 16 bit field to clamp the values to
// fit into a 16 bit signed integer.
int rounded_xMin = hb_clamp (roundf (xMin), -32768.0f, 32767.0f);
int rounded_xMax = hb_clamp (roundf (xMax), -32768.0f, 32767.0f);
int rounded_yMin = hb_clamp (roundf (yMin), -32768.0f, 32767.0f);
int rounded_yMax = hb_clamp (roundf (yMax), -32768.0f, 32767.0f);
int rounded_xMin = roundf (xMin);
int rounded_xMax = roundf (xMax);
int rounded_yMin = roundf (yMin);
int rounded_yMax = roundf (yMax);
update_mtx (plan, rounded_xMin, rounded_xMax, rounded_yMin, rounded_yMax, all_points);
if (type != EMPTY)
{
@ -287,6 +350,7 @@ struct Glyph
bool use_my_metrics = true,
bool phantom_only = false,
hb_array_t<int> coords = hb_array_t<int> (),
hb_map_t *current_glyphs = nullptr,
unsigned int depth = 0,
unsigned *edge_count = nullptr) const
{
@ -296,6 +360,10 @@ struct Glyph
if (unlikely (*edge_count > HB_GLYF_MAX_EDGE_COUNT)) return false;
(*edge_count)++;
hb_map_t current_glyphs_stack;
if (current_glyphs == nullptr)
current_glyphs = &current_glyphs_stack;
if (head_maxp_info)
{
head_maxp_info->maxComponentDepth = hb_max (head_maxp_info->maxComponentDepth, depth);
@ -305,9 +373,8 @@ struct Glyph
coords = hb_array (font->coords, font->num_coords);
contour_point_vector_t stack_points;
bool inplace = type == SIMPLE && all_points.length == 0;
/* Load into all_points if it's empty, as an optimization. */
contour_point_vector_t &points = inplace ? all_points : stack_points;
contour_point_vector_t &points = type == SIMPLE ? all_points : stack_points;
unsigned old_length = points.length;
switch (type) {
case SIMPLE:
@ -315,7 +382,7 @@ struct Glyph
head_maxp_info->maxContours = hb_max (head_maxp_info->maxContours, (unsigned) header->numberOfContours);
if (depth > 0 && composite_contours)
*composite_contours += (unsigned) header->numberOfContours;
if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (points, phantom_only)))
if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (all_points, phantom_only)))
return false;
break;
case COMPOSITE:
@ -329,6 +396,7 @@ struct Glyph
{
for (auto &item : get_var_composite_iterator ())
if (unlikely (!item.get_points (points))) return false;
break;
}
#endif
case EMPTY:
@ -365,9 +433,11 @@ struct Glyph
}
#ifndef HB_NO_VAR
glyf_accelerator.gvar->apply_deltas_to_points (gid,
coords,
points.as_array ());
if (coords)
glyf_accelerator.gvar->apply_deltas_to_points (gid,
coords,
points.as_array ().sub_array (old_length),
phantom_only && type == SIMPLE);
#endif
// mainly used by CompositeGlyph calculating new X/Y offset value so no need to extend it
@ -375,27 +445,33 @@ struct Glyph
if (points_with_deltas != nullptr && depth == 0 && type == COMPOSITE)
{
if (unlikely (!points_with_deltas->resize (points.length))) return false;
points_with_deltas->copy_vector (points);
*points_with_deltas = points;
}
switch (type) {
case SIMPLE:
if (depth == 0 && head_maxp_info)
head_maxp_info->maxPoints = hb_max (head_maxp_info->maxPoints, points.length - 4);
if (!inplace)
all_points.extend (points.as_array ());
head_maxp_info->maxPoints = hb_max (head_maxp_info->maxPoints, all_points.length - old_length - 4);
break;
case COMPOSITE:
{
contour_point_vector_t comp_points;
unsigned int comp_index = 0;
for (auto &item : get_composite_iterator ())
{
comp_points.reset ();
if (unlikely (!glyf_accelerator.glyph_for_gid (item.get_gid ())
hb_codepoint_t item_gid = item.get_gid ();
if (unlikely (current_glyphs->has (item_gid)))
continue;
current_glyphs->add (item_gid);
unsigned old_count = all_points.length;
if (unlikely ((!phantom_only || (use_my_metrics && item.is_use_my_metrics ())) &&
!glyf_accelerator.glyph_for_gid (item_gid)
.get_points (font,
glyf_accelerator,
comp_points,
all_points,
points_with_deltas,
head_maxp_info,
composite_contours,
@ -403,23 +479,32 @@ struct Glyph
use_my_metrics,
phantom_only,
coords,
current_glyphs,
depth + 1,
edge_count)))
{
current_glyphs->del (item_gid);
return false;
}
auto comp_points = all_points.as_array ().sub_array (old_count);
/* Copy phantom points from component if USE_MY_METRICS flag set */
if (use_my_metrics && item.is_use_my_metrics ())
for (unsigned int i = 0; i < PHANTOM_COUNT; i++)
phantoms[i] = comp_points[comp_points.length - PHANTOM_COUNT + i];
float matrix[4];
contour_point_t default_trans;
item.get_transformation (matrix, default_trans);
if (comp_points) // Empty in case of phantom_only
{
float matrix[4];
contour_point_t default_trans;
item.get_transformation (matrix, default_trans);
/* Apply component transformation & translation (with deltas applied) */
item.transform_points (comp_points, matrix, points[comp_index]);
/* Apply component transformation & translation (with deltas applied) */
item.transform_points (comp_points, matrix, points[comp_index]);
}
if (item.is_anchored ())
if (item.is_anchored () && !phantom_only)
{
unsigned int p1, p2;
item.get_anchor_points (p1, p2);
@ -429,16 +514,20 @@ struct Glyph
delta.init (all_points[p1].x - comp_points[p2].x,
all_points[p1].y - comp_points[p2].y);
comp_points.translate (delta);
item.translate (delta, comp_points);
}
}
all_points.extend (comp_points.as_array ().sub_array (0, comp_points.length - PHANTOM_COUNT));
all_points.resize (all_points.length - PHANTOM_COUNT);
if (all_points.length > HB_GLYF_MAX_POINTS)
{
current_glyphs->del (item_gid);
return false;
}
comp_index++;
current_glyphs->del (item_gid);
}
if (head_maxp_info && depth == 0)
@ -453,26 +542,37 @@ struct Glyph
#ifndef HB_NO_VAR_COMPOSITES
case VAR_COMPOSITE:
{
contour_point_vector_t comp_points;
hb_array_t<contour_point_t> points_left = points.as_array ();
for (auto &item : get_var_composite_iterator ())
{
hb_codepoint_t item_gid = item.get_gid ();
if (unlikely (current_glyphs->has (item_gid)))
continue;
current_glyphs->add (item_gid);
unsigned item_num_points = item.get_num_points ();
hb_array_t<contour_point_t> record_points = points_left.sub_array (0, item_num_points);
comp_points.reset ();
assert (record_points.length == item_num_points);
auto component_coords = coords;
if (item.is_reset_unspecified_axes ())
/* Copying coords is expensive; so we have put an arbitrary
* limit on the max number of coords for now. */
if (item.is_reset_unspecified_axes () ||
coords.length > HB_GLYF_VAR_COMPOSITE_MAX_AXES)
component_coords = hb_array<int> ();
coord_setter_t coord_setter (component_coords);
item.set_variations (coord_setter, record_points);
if (unlikely (!glyf_accelerator.glyph_for_gid (item.get_gid ())
unsigned old_count = all_points.length;
if (unlikely ((!phantom_only || (use_my_metrics && item.is_use_my_metrics ())) &&
!glyf_accelerator.glyph_for_gid (item_gid)
.get_points (font,
glyf_accelerator,
comp_points,
all_points,
points_with_deltas,
head_maxp_info,
nullptr,
@ -480,24 +580,36 @@ struct Glyph
use_my_metrics,
phantom_only,
coord_setter.get_coords (),
current_glyphs,
depth + 1,
edge_count)))
{
current_glyphs->del (item_gid);
return false;
}
auto comp_points = all_points.as_array ().sub_array (old_count);
/* Apply component transformation */
item.transform_points (record_points, comp_points);
if (comp_points) // Empty in case of phantom_only
item.transform_points (record_points, comp_points);
/* Copy phantom points from component if USE_MY_METRICS flag set */
if (use_my_metrics && item.is_use_my_metrics ())
for (unsigned int i = 0; i < PHANTOM_COUNT; i++)
phantoms[i] = comp_points[comp_points.length - PHANTOM_COUNT + i];
all_points.extend (comp_points.as_array ().sub_array (0, comp_points.length - PHANTOM_COUNT));
all_points.resize (all_points.length - PHANTOM_COUNT);
if (all_points.length > HB_GLYF_MAX_POINTS)
{
current_glyphs->del (item_gid);
return false;
}
points_left += item_num_points;
current_glyphs->del (item_gid);
}
all_points.extend (phantoms);
} break;
@ -512,9 +624,10 @@ struct Glyph
/* Undocumented rasterizer behavior:
* Shift points horizontally by the updated left side bearing
*/
contour_point_t delta;
delta.init (-phantoms[PHANTOM_LEFT].x, 0.f);
if (delta.x) all_points.translate (delta);
int v = -phantoms[PHANTOM_LEFT].x;
if (v)
for (auto &point : all_points)
point.x += v;
}
return !all_points.in_error ();
@ -545,10 +658,11 @@ struct Glyph
int num_contours = header->numberOfContours;
if (unlikely (num_contours == 0)) type = EMPTY;
else if (num_contours > 0) type = SIMPLE;
else if (num_contours == -1) type = COMPOSITE;
#ifndef HB_NO_VAR_COMPOSITES
else if (num_contours == -2) type = VAR_COMPOSITE;
#endif
else type = COMPOSITE; /* negative numbers */
else type = EMPTY; // Spec deviation; Spec says COMPOSITE, but not seen in the wild.
}
protected:

View File

@ -124,7 +124,7 @@ struct SimpleGlyph
}
static bool read_flags (const HBUINT8 *&p /* IN/OUT */,
contour_point_vector_t &points_ /* IN/OUT */,
hb_array_t<contour_point_t> points_ /* IN/OUT */,
const HBUINT8 *end)
{
unsigned count = points_.length;
@ -146,7 +146,7 @@ struct SimpleGlyph
}
static bool read_points (const HBUINT8 *&p /* IN/OUT */,
contour_point_vector_t &points_ /* IN/OUT */,
hb_array_t<contour_point_t> points_ /* IN/OUT */,
const HBUINT8 *end,
float contour_point_t::*m,
const simple_glyph_flag_t short_flag,
@ -154,10 +154,9 @@ struct SimpleGlyph
{
int v = 0;
unsigned count = points_.length;
for (unsigned i = 0; i < count; i++)
for (auto &point : points_)
{
unsigned flag = points_[i].flag;
unsigned flag = point.flag;
if (flag & short_flag)
{
if (unlikely (p + 1 > end)) return false;
@ -175,23 +174,27 @@ struct SimpleGlyph
p += HBINT16::static_size;
}
}
points_.arrayZ[i].*m = v;
point.*m = v;
}
return true;
}
bool get_contour_points (contour_point_vector_t &points_ /* OUT */,
bool get_contour_points (contour_point_vector_t &points /* OUT */,
bool phantom_only = false) const
{
const HBUINT16 *endPtsOfContours = &StructAfter<HBUINT16> (header);
int num_contours = header.numberOfContours;
assert (num_contours);
assert (num_contours > 0);
/* One extra item at the end, for the instruction-count below. */
if (unlikely (!bytes.check_range (&endPtsOfContours[num_contours]))) return false;
unsigned int num_points = endPtsOfContours[num_contours - 1] + 1;
points_.alloc (num_points + 4, true); // Allocate for phantom points, to avoid a possible copy
if (!points_.resize (num_points)) return false;
unsigned old_length = points.length;
points.alloc (points.length + num_points + 4, true); // Allocate for phantom points, to avoid a possible copy
if (unlikely (!points.resize (points.length + num_points, false))) return false;
auto points_ = points.as_array ().sub_array (old_length);
if (!phantom_only)
hb_memset (points_.arrayZ, 0, sizeof (contour_point_t) * num_points);
if (phantom_only) return true;
for (int i = 0; i < num_contours; i++)
@ -214,7 +217,7 @@ struct SimpleGlyph
}
static void encode_coord (int value,
uint8_t &flag,
unsigned &flag,
const simple_glyph_flag_t short_flag,
const simple_glyph_flag_t same_flag,
hb_vector_t<uint8_t> &coords /* OUT */)
@ -239,9 +242,9 @@ struct SimpleGlyph
}
}
static void encode_flag (uint8_t &flag,
uint8_t &repeat,
uint8_t lastflag,
static void encode_flag (unsigned flag,
unsigned &repeat,
unsigned lastflag,
hb_vector_t<uint8_t> &flags /* OUT */)
{
if (flag == lastflag && repeat != 255)
@ -262,7 +265,7 @@ struct SimpleGlyph
else
{
repeat = 0;
flags.push (flag);
flags.arrayZ[flags.length++] = flag;
}
}
@ -282,13 +285,13 @@ struct SimpleGlyph
if (unlikely (!x_coords.alloc (2*num_points, true))) return false;
if (unlikely (!y_coords.alloc (2*num_points, true))) return false;
uint8_t lastflag = 255, repeat = 0;
unsigned lastflag = 255, repeat = 0;
int prev_x = 0, prev_y = 0;
for (unsigned i = 0; i < num_points; i++)
{
uint8_t flag = all_points.arrayZ[i].flag;
flag &= FLAG_ON_CURVE + FLAG_OVERLAP_SIMPLE;
unsigned flag = all_points.arrayZ[i].flag;
flag &= FLAG_ON_CURVE | FLAG_OVERLAP_SIMPLE | FLAG_CUBIC;
int cur_x = roundf (all_points.arrayZ[i].x);
int cur_y = roundf (all_points.arrayZ[i].y);

View File

@ -22,7 +22,7 @@ struct SubsetGlyph
bool serialize (hb_serialize_context_t *c,
bool use_short_loca,
const hb_subset_plan_t *plan)
const hb_subset_plan_t *plan) const
{
TRACE_SERIALIZE (this);
@ -40,7 +40,7 @@ struct SubsetGlyph
pad = 0;
while (pad_length > 0)
{
c->embed (pad);
(void) c->embed (pad);
pad_length--;
}

View File

@ -36,24 +36,21 @@ struct VarCompositeGlyphRecord
unsigned int get_size () const
{
unsigned fl = flags;
unsigned int size = min_size;
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 4 : 3;
unsigned axis_width = (fl & AXIS_INDICES_ARE_SHORT) ? 4 : 3;
size += numAxes * axis_width;
// gid
size += 2;
if (flags & GID_IS_24BIT) size += 1;
if (fl & GID_IS_24BIT) size += 1;
if (flags & HAVE_TRANSLATE_X) size += 2;
if (flags & HAVE_TRANSLATE_Y) size += 2;
if (flags & HAVE_ROTATION) size += 2;
if (flags & HAVE_SCALE_X) size += 2;
if (flags & HAVE_SCALE_Y) size += 2;
if (flags & HAVE_SKEW_X) size += 2;
if (flags & HAVE_SKEW_Y) size += 2;
if (flags & HAVE_TCENTER_X) size += 2;
if (flags & HAVE_TCENTER_Y) size += 2;
// 2 bytes each for the following flags
fl = fl & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y |
HAVE_ROTATION |
HAVE_SCALE_X | HAVE_SCALE_Y |
HAVE_SKEW_X | HAVE_SKEW_Y |
HAVE_TCENTER_X | HAVE_TCENTER_Y);
size += hb_popcount (fl) * 2;
return size;
}
@ -66,17 +63,17 @@ struct VarCompositeGlyphRecord
hb_codepoint_t get_gid () const
{
if (flags & GID_IS_24BIT)
return StructAfter<const HBGlyphID24> (numAxes);
return * (const HBGlyphID24 *) &pad;
else
return StructAfter<const HBGlyphID16> (numAxes);
return * (const HBGlyphID16 *) &pad;
}
void set_gid (hb_codepoint_t gid)
{
if (flags & GID_IS_24BIT)
StructAfter<HBGlyphID24> (numAxes) = gid;
* (HBGlyphID24 *) &pad = gid;
else
StructAfter<HBGlyphID16> (numAxes) = gid;
* (HBGlyphID16 *) &pad = gid;
}
unsigned get_numAxes () const
@ -86,26 +83,44 @@ struct VarCompositeGlyphRecord
unsigned get_num_points () const
{
unsigned fl = flags;
unsigned num = 0;
if (flags & AXES_HAVE_VARIATION) num += numAxes;
if (flags & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y)) num++;
if (flags & HAVE_ROTATION) num++;
if (flags & (HAVE_SCALE_X | HAVE_SCALE_Y)) num++;
if (flags & (HAVE_SKEW_X | HAVE_SKEW_Y)) num++;
if (flags & (HAVE_TCENTER_X | HAVE_TCENTER_Y)) num++;
if (fl & AXES_HAVE_VARIATION) num += numAxes;
/* Hopefully faster code, relying on the value of the flags. */
fl = (((fl & (HAVE_TRANSLATE_Y | HAVE_SCALE_Y | HAVE_SKEW_Y | HAVE_TCENTER_Y)) >> 1) | fl) &
(HAVE_TRANSLATE_X | HAVE_ROTATION | HAVE_SCALE_X | HAVE_SKEW_X | HAVE_TCENTER_X);
num += hb_popcount (fl);
return num;
/* Slower but more readable code. */
if (fl & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y)) num++;
if (fl & HAVE_ROTATION) num++;
if (fl & (HAVE_SCALE_X | HAVE_SCALE_Y)) num++;
if (fl & (HAVE_SKEW_X | HAVE_SKEW_Y)) num++;
if (fl & (HAVE_TCENTER_X | HAVE_TCENTER_Y)) num++;
return num;
}
void transform_points (hb_array_t<contour_point_t> record_points,
contour_point_vector_t &points) const
void transform_points (hb_array_t<const contour_point_t> record_points,
hb_array_t<contour_point_t> points) const
{
float matrix[4];
contour_point_t trans;
get_transformation_from_points (record_points, matrix, trans);
get_transformation_from_points (record_points.arrayZ, matrix, trans);
points.transform (matrix);
points.translate (trans);
auto arrayZ = points.arrayZ;
unsigned count = points.length;
if (matrix[0] != 1.f || matrix[1] != 0.f ||
matrix[2] != 0.f || matrix[3] != 1.f)
for (unsigned i = 0; i < count; i++)
arrayZ[i].transform (matrix);
if (trans.x != 0.f || trans.y != 0.f)
for (unsigned i = 0; i < count; i++)
arrayZ[i].translate (trans);
}
static inline void transform (float (&matrix)[4], contour_point_t &trans,
@ -136,26 +151,41 @@ struct VarCompositeGlyphRecord
static void translate (float (&matrix)[4], contour_point_t &trans,
float translateX, float translateY)
{
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L213
float other[6] = {1.f, 0.f, 0.f, 1.f, translateX, translateY};
transform (matrix, trans, other);
if (!translateX && !translateY)
return;
trans.x += matrix[0] * translateX + matrix[2] * translateY;
trans.y += matrix[1] * translateX + matrix[3] * translateY;
}
static void scale (float (&matrix)[4], contour_point_t &trans,
float scaleX, float scaleY)
{
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L224
float other[6] = {scaleX, 0.f, 0.f, scaleY, 0.f, 0.f};
transform (matrix, trans, other);
if (scaleX == 1.f && scaleY == 1.f)
return;
matrix[0] *= scaleX;
matrix[1] *= scaleX;
matrix[2] *= scaleY;
matrix[3] *= scaleY;
}
static void rotate (float (&matrix)[4], contour_point_t &trans,
float rotation)
{
if (!rotation)
return;
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L240
rotation = rotation * HB_PI;
float c = cosf (rotation);
float s = sinf (rotation);
float c;
float s;
#ifdef HAVE_SINCOSF
sincosf (rotation, &s, &c);
#else
c = cosf (rotation);
s = sinf (rotation);
#endif
float other[6] = {c, s, -s, c, 0.f, 0.f};
transform (matrix, trans, other);
}
@ -163,101 +193,100 @@ struct VarCompositeGlyphRecord
static void skew (float (&matrix)[4], contour_point_t &trans,
float skewX, float skewY)
{
if (!skewX && !skewY)
return;
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L255
skewX = skewX * HB_PI;
skewY = skewY * HB_PI;
float other[6] = {1.f, tanf (skewY), tanf (skewX), 1.f, 0.f, 0.f};
float other[6] = {1.f,
skewY ? tanf (skewY) : 0.f,
skewX ? tanf (skewX) : 0.f,
1.f,
0.f, 0.f};
transform (matrix, trans, other);
}
bool get_points (contour_point_vector_t &points) const
{
float translateX = 0.f;
float translateY = 0.f;
float rotation = 0.f;
float scaleX = 1.f * (1 << 10);
float scaleY = 1.f * (1 << 10);
float skewX = 0.f;
float skewY = 0.f;
float tCenterX = 0.f;
float tCenterY = 0.f;
unsigned num_points = get_num_points ();
if (unlikely (!points.resize (points.length + num_points))) return false;
points.alloc (points.length + num_points + 4); // For phantom points
if (unlikely (!points.resize (points.length + num_points, false))) return false;
contour_point_t *rec_points = points.arrayZ + (points.length - num_points);
hb_memset (rec_points, 0, num_points * sizeof (rec_points[0]));
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 2 : 1;
unsigned axes_size = numAxes * axis_width;
unsigned fl = flags;
unsigned num_axes = numAxes;
unsigned axis_width = (fl & AXIS_INDICES_ARE_SHORT) ? 2 : 1;
unsigned axes_size = num_axes * axis_width;
const F2DOT14 *q = (const F2DOT14 *) (axes_size +
(flags & GID_IS_24BIT ? 3 : 2) +
&StructAfter<const HBUINT8> (numAxes));
(fl & GID_IS_24BIT ? 3 : 2) +
(const HBUINT8 *) &pad);
hb_array_t<contour_point_t> rec_points = points.as_array ().sub_array (points.length - num_points);
unsigned count = numAxes;
if (flags & AXES_HAVE_VARIATION)
unsigned count = num_axes;
if (fl & AXES_HAVE_VARIATION)
{
for (unsigned i = 0; i < count; i++)
rec_points[i].x = q++->to_int ();
rec_points += count;
rec_points++->x = q++->to_int ();
}
else
q += count;
const HBUINT16 *p = (const HBUINT16 *) q;
if (flags & HAVE_TRANSLATE_X) translateX = * (const FWORD *) p++;
if (flags & HAVE_TRANSLATE_Y) translateY = * (const FWORD *) p++;
if (flags & HAVE_ROTATION) rotation = ((const F4DOT12 *) p++)->to_int ();
if (flags & HAVE_SCALE_X) scaleX = ((const F6DOT10 *) p++)->to_int ();
if (flags & HAVE_SCALE_Y) scaleY = ((const F6DOT10 *) p++)->to_int ();
if (flags & HAVE_SKEW_X) skewX = ((const F4DOT12 *) p++)->to_int ();
if (flags & HAVE_SKEW_Y) skewY = ((const F4DOT12 *) p++)->to_int ();
if (flags & HAVE_TCENTER_X) tCenterX = * (const FWORD *) p++;
if (flags & HAVE_TCENTER_Y) tCenterY = * (const FWORD *) p++;
if ((flags & UNIFORM_SCALE) && !(flags & HAVE_SCALE_Y))
scaleY = scaleX;
if (flags & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y))
if (fl & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y))
{
rec_points[0].x = translateX;
rec_points[0].y = translateY;
int translateX = (fl & HAVE_TRANSLATE_X) ? * (const FWORD *) p++ : 0;
int translateY = (fl & HAVE_TRANSLATE_Y) ? * (const FWORD *) p++ : 0;
rec_points->x = translateX;
rec_points->y = translateY;
rec_points++;
}
if (flags & HAVE_ROTATION)
if (fl & HAVE_ROTATION)
{
rec_points[0].x = rotation;
int rotation = (fl & HAVE_ROTATION) ? ((const F4DOT12 *) p++)->to_int () : 0;
rec_points->x = rotation;
rec_points++;
}
if (flags & (HAVE_SCALE_X | HAVE_SCALE_Y))
if (fl & (HAVE_SCALE_X | HAVE_SCALE_Y))
{
rec_points[0].x = scaleX;
rec_points[0].y = scaleY;
int scaleX = (fl & HAVE_SCALE_X) ? ((const F6DOT10 *) p++)->to_int () : 1 << 10;
int scaleY = (fl & HAVE_SCALE_Y) ? ((const F6DOT10 *) p++)->to_int () : 1 << 10;
if ((fl & UNIFORM_SCALE) && !(fl & HAVE_SCALE_Y))
scaleY = scaleX;
rec_points->x = scaleX;
rec_points->y = scaleY;
rec_points++;
}
if (flags & (HAVE_SKEW_X | HAVE_SKEW_Y))
if (fl & (HAVE_SKEW_X | HAVE_SKEW_Y))
{
rec_points[0].x = skewX;
rec_points[0].y = skewY;
int skewX = (fl & HAVE_SKEW_X) ? ((const F4DOT12 *) p++)->to_int () : 0;
int skewY = (fl & HAVE_SKEW_Y) ? ((const F4DOT12 *) p++)->to_int () : 0;
rec_points->x = skewX;
rec_points->y = skewY;
rec_points++;
}
if (flags & (HAVE_TCENTER_X | HAVE_TCENTER_Y))
if (fl & (HAVE_TCENTER_X | HAVE_TCENTER_Y))
{
rec_points[0].x = tCenterX;
rec_points[0].y = tCenterY;
int tCenterX = (fl & HAVE_TCENTER_X) ? * (const FWORD *) p++ : 0;
int tCenterY = (fl & HAVE_TCENTER_Y) ? * (const FWORD *) p++ : 0;
rec_points->x = tCenterX;
rec_points->y = tCenterY;
rec_points++;
}
assert (!rec_points);
return true;
}
void get_transformation_from_points (hb_array_t<contour_point_t> rec_points,
void get_transformation_from_points (const contour_point_t *rec_points,
float (&matrix)[4], contour_point_t &trans) const
{
if (flags & AXES_HAVE_VARIATION)
unsigned fl = flags;
if (fl & AXES_HAVE_VARIATION)
rec_points += numAxes;
matrix[0] = matrix[3] = 1.f;
@ -274,36 +303,35 @@ struct VarCompositeGlyphRecord
float tCenterX = 0.f;
float tCenterY = 0.f;
if (flags & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y))
if (fl & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y))
{
translateX = rec_points[0].x;
translateY = rec_points[0].y;
translateX = rec_points->x;
translateY = rec_points->y;
rec_points++;
}
if (flags & HAVE_ROTATION)
if (fl & HAVE_ROTATION)
{
rotation = rec_points[0].x / (1 << 12);
rotation = rec_points->x / (1 << 12);
rec_points++;
}
if (flags & (HAVE_SCALE_X | HAVE_SCALE_Y))
if (fl & (HAVE_SCALE_X | HAVE_SCALE_Y))
{
scaleX = rec_points[0].x / (1 << 10);
scaleY = rec_points[0].y / (1 << 10);
scaleX = rec_points->x / (1 << 10);
scaleY = rec_points->y / (1 << 10);
rec_points++;
}
if (flags & (HAVE_SKEW_X | HAVE_SKEW_Y))
if (fl & (HAVE_SKEW_X | HAVE_SKEW_Y))
{
skewX = rec_points[0].x / (1 << 12);
skewY = rec_points[0].y / (1 << 12);
skewX = rec_points->x / (1 << 12);
skewY = rec_points->y / (1 << 12);
rec_points++;
}
if (flags & (HAVE_TCENTER_X | HAVE_TCENTER_Y))
if (fl & (HAVE_TCENTER_X | HAVE_TCENTER_Y))
{
tCenterX = rec_points[0].x;
tCenterY = rec_points[0].y;
tCenterX = rec_points->x;
tCenterY = rec_points->y;
rec_points++;
}
assert (!rec_points);
translate (matrix, trans, translateX + tCenterX, translateY + tCenterY);
rotate (matrix, trans, rotation);
@ -317,18 +345,19 @@ struct VarCompositeGlyphRecord
{
bool have_variations = flags & AXES_HAVE_VARIATION;
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 2 : 1;
unsigned num_axes = numAxes;
const HBUINT8 *p = (const HBUINT8 *) (((HBUINT8 *) &numAxes) + numAxes.static_size + (flags & GID_IS_24BIT ? 3 : 2));
const HBUINT16 *q = (const HBUINT16 *) (((HBUINT8 *) &numAxes) + numAxes.static_size + (flags & GID_IS_24BIT ? 3 : 2));
const F2DOT14 *a = (const F2DOT14 *) ((HBUINT8 *) (axis_width == 1 ? (p + numAxes) : (HBUINT8 *) (q + numAxes)));
const F2DOT14 *a = (const F2DOT14 *) ((HBUINT8 *) (axis_width == 1 ? (p + num_axes) : (HBUINT8 *) (q + num_axes)));
unsigned count = numAxes;
unsigned count = num_axes;
for (unsigned i = 0; i < count; i++)
{
unsigned axis_index = axis_width == 1 ? (unsigned) *p++ : (unsigned) *q++;
signed v = have_variations ? rec_points[i].x : a++->to_int ();
signed v = have_variations ? rec_points.arrayZ[i].x : a++->to_int ();
v = hb_clamp (v, -(1<<14), (1<<14));
setter[axis_index] = v;
@ -338,8 +367,9 @@ struct VarCompositeGlyphRecord
protected:
HBUINT16 flags;
HBUINT8 numAxes;
HBUINT16 pad;
public:
DEFINE_SIZE_MIN (3);
DEFINE_SIZE_MIN (5);
};
using var_composite_iter_t = composite_iter_tmpl<VarCompositeGlyphRecord>;

View File

@ -16,6 +16,8 @@ struct coord_setter_t
int& operator [] (unsigned idx)
{
if (unlikely (idx >= HB_GLYF_VAR_COMPOSITE_MAX_AXES))
return Crap(int);
if (coords.length < idx + 1)
coords.resize (idx + 1);
return coords[idx];

View File

@ -12,24 +12,44 @@ namespace OT {
namespace glyf_impl {
template<typename IteratorIn, typename IteratorOut,
hb_requires (hb_is_source_of (IteratorIn, unsigned int)),
hb_requires (hb_is_sink_of (IteratorOut, unsigned))>
template<typename IteratorIn, typename TypeOut,
hb_requires (hb_is_source_of (IteratorIn, unsigned int))>
static void
_write_loca (IteratorIn&& it, bool short_offsets, IteratorOut&& dest)
_write_loca (IteratorIn&& it,
const hb_sorted_vector_t<hb_codepoint_pair_t> new_to_old_gid_list,
bool short_offsets,
TypeOut *dest,
unsigned num_offsets)
{
unsigned right_shift = short_offsets ? 1 : 0;
unsigned int offset = 0;
dest << 0;
+ it
| hb_map ([=, &offset] (unsigned int padded_size)
{
offset += padded_size;
DEBUG_MSG (SUBSET, nullptr, "loca entry offset %u", offset);
return offset >> right_shift;
})
| hb_sink (dest)
;
unsigned offset = 0;
TypeOut value;
value = 0;
*dest++ = value;
hb_codepoint_t last = 0;
for (auto _ : new_to_old_gid_list)
{
hb_codepoint_t gid = _.first;
for (; last < gid; last++)
{
DEBUG_MSG (SUBSET, nullptr, "loca entry empty offset %u", offset);
*dest++ = value;
}
unsigned padded_size = *it++;
offset += padded_size;
DEBUG_MSG (SUBSET, nullptr, "loca entry gid %u offset %u padded-size %u", gid, offset, padded_size);
value = offset >> right_shift;
*dest++ = value;
last++; // Skip over gid
}
unsigned num_glyphs = num_offsets - 1;
for (; last < num_glyphs; last++)
{
DEBUG_MSG (SUBSET, nullptr, "loca entry empty offset %u", offset);
*dest++ = value;
}
}
static bool
@ -67,11 +87,14 @@ _add_head_and_set_loca_version (hb_subset_plan_t *plan, bool use_short_loca)
template<typename Iterator,
hb_requires (hb_is_source_of (Iterator, unsigned int))>
static bool
_add_loca_and_head (hb_subset_plan_t * plan, Iterator padded_offsets, bool use_short_loca)
_add_loca_and_head (hb_subset_context_t *c,
Iterator padded_offsets,
bool use_short_loca)
{
unsigned num_offsets = padded_offsets.len () + 1;
unsigned num_offsets = c->plan->num_output_glyphs () + 1;
unsigned entry_size = use_short_loca ? 2 : 4;
char *loca_prime_data = (char *) hb_calloc (entry_size, num_offsets);
char *loca_prime_data = (char *) hb_malloc (entry_size * num_offsets);
if (unlikely (!loca_prime_data)) return false;
@ -79,9 +102,9 @@ _add_loca_and_head (hb_subset_plan_t * plan, Iterator padded_offsets, bool use_s
entry_size, num_offsets, entry_size * num_offsets);
if (use_short_loca)
_write_loca (padded_offsets, true, hb_array ((HBUINT16 *) loca_prime_data, num_offsets));
_write_loca (padded_offsets, c->plan->new_to_old_gid_list, true, (HBUINT16 *) loca_prime_data, num_offsets);
else
_write_loca (padded_offsets, false, hb_array ((HBUINT32 *) loca_prime_data, num_offsets));
_write_loca (padded_offsets, c->plan->new_to_old_gid_list, false, (HBUINT32 *) loca_prime_data, num_offsets);
hb_blob_t *loca_blob = hb_blob_create (loca_prime_data,
entry_size * num_offsets,
@ -89,8 +112,8 @@ _add_loca_and_head (hb_subset_plan_t * plan, Iterator padded_offsets, bool use_s
loca_prime_data,
hb_free);
bool result = plan->add_table (HB_OT_TAG_loca, loca_blob)
&& _add_head_and_set_loca_version (plan, use_short_loca);
bool result = c->plan->add_table (HB_OT_TAG_loca, loca_blob)
&& _add_head_and_set_loca_version (c->plan, use_short_loca);
hb_blob_destroy (loca_blob);
return result;

View File

@ -85,75 +85,72 @@ struct glyf
return_trace (false);
}
glyf *glyf_prime = c->serializer->start_embed <glyf> ();
if (unlikely (!c->serializer->check_success (glyf_prime))) return_trace (false);
hb_font_t *font = nullptr;
if (c->plan->normalized_coords)
{
font = _create_font_for_instancing (c->plan);
if (unlikely (!font)) return false;
if (unlikely (!font))
return_trace (false);
}
hb_vector_t<unsigned> padded_offsets;
unsigned num_glyphs = c->plan->num_output_glyphs ();
if (unlikely (!padded_offsets.resize (num_glyphs)))
{
hb_font_destroy (font);
return false;
}
if (unlikely (!padded_offsets.alloc (c->plan->new_to_old_gid_list.length, true)))
return_trace (false);
hb_vector_t<glyf_impl::SubsetGlyph> glyphs;
if (!_populate_subset_glyphs (c->plan, font, glyphs))
{
hb_font_destroy (font);
return false;
return_trace (false);
}
if (font)
hb_font_destroy (font);
unsigned max_offset = 0;
for (unsigned i = 0; i < num_glyphs; i++)
for (auto &g : glyphs)
{
padded_offsets[i] = glyphs[i].padded_size ();
max_offset += padded_offsets[i];
unsigned size = g.padded_size ();
padded_offsets.push (size);
max_offset += size;
}
bool use_short_loca = false;
if (likely (!c->plan->force_long_loca))
use_short_loca = max_offset < 0x1FFFF;
if (!use_short_loca) {
for (unsigned i = 0; i < num_glyphs; i++)
padded_offsets[i] = glyphs[i].length ();
if (!use_short_loca)
{
padded_offsets.resize (0);
for (auto &g : glyphs)
padded_offsets.push (g.length ());
}
bool result = glyf_prime->serialize (c->serializer, glyphs.writer (), use_short_loca, c->plan);
auto *glyf_prime = c->serializer->start_embed <glyf> ();
bool result = glyf_prime->serialize (c->serializer, hb_iter (glyphs), use_short_loca, c->plan);
if (c->plan->normalized_coords && !c->plan->pinned_at_default)
_free_compiled_subset_glyphs (glyphs);
if (!result) return false;
if (unlikely (!c->serializer->check_success (glyf_impl::_add_loca_and_head (c,
padded_offsets.iter (),
use_short_loca))))
return_trace (false);
if (unlikely (c->serializer->in_error ())) return_trace (false);
return_trace (c->serializer->check_success (glyf_impl::_add_loca_and_head (c->plan,
padded_offsets.iter (),
use_short_loca)));
return result;
}
bool
_populate_subset_glyphs (const hb_subset_plan_t *plan,
hb_font_t *font,
hb_vector_t<glyf_impl::SubsetGlyph> &glyphs /* OUT */) const;
hb_vector_t<glyf_impl::SubsetGlyph>& glyphs /* OUT */) const;
hb_font_t *
_create_font_for_instancing (const hb_subset_plan_t *plan) const;
void _free_compiled_subset_glyphs (hb_vector_t<glyf_impl::SubsetGlyph> &glyphs) const
{
for (unsigned i = 0; i < glyphs.length; i++)
glyphs[i].free_compiled_bytes ();
for (auto &g : glyphs)
g.free_compiled_bytes ();
}
protected:
@ -222,13 +219,14 @@ struct glyf_accelerator_t
if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, nullptr, nullptr, nullptr, true, true, phantom_only)))
return false;
unsigned count = all_points.length;
assert (count >= glyf_impl::PHANTOM_COUNT);
count -= glyf_impl::PHANTOM_COUNT;
if (consumer.is_consuming_contour_points ())
{
unsigned count = all_points.length;
assert (count >= glyf_impl::PHANTOM_COUNT);
count -= glyf_impl::PHANTOM_COUNT;
for (unsigned point_index = 0; point_index < count; point_index++)
consumer.consume_point (all_points[point_index]);
for (auto &point : all_points.as_array ().sub_array (0, count))
consumer.consume_point (point);
consumer.points_end ();
}
@ -236,7 +234,7 @@ struct glyf_accelerator_t
contour_point_t *phantoms = consumer.get_phantoms_sink ();
if (phantoms)
for (unsigned i = 0; i < glyf_impl::PHANTOM_COUNT; ++i)
phantoms[i] = all_points[all_points.length - glyf_impl::PHANTOM_COUNT + i];
phantoms[i] = all_points.arrayZ[count + i];
return true;
}
@ -299,6 +297,7 @@ struct glyf_accelerator_t
if (extents) bounds = contour_bounds_t ();
}
HB_ALWAYS_INLINE
void consume_point (const contour_point_t &point) { bounds.add (point); }
void points_end () { bounds.get_extents (font, extents, scaled); }
@ -431,16 +430,17 @@ glyf::_populate_subset_glyphs (const hb_subset_plan_t *plan,
hb_vector_t<glyf_impl::SubsetGlyph>& glyphs /* OUT */) const
{
OT::glyf_accelerator_t glyf (plan->source);
unsigned num_glyphs = plan->num_output_glyphs ();
if (!glyphs.resize (num_glyphs)) return false;
if (!glyphs.alloc (plan->new_to_old_gid_list.length, true)) return false;
for (auto p : plan->glyph_map->iter ())
for (const auto &pair : plan->new_to_old_gid_list)
{
unsigned new_gid = p.second;
glyf_impl::SubsetGlyph& subset_glyph = glyphs.arrayZ[new_gid];
subset_glyph.old_gid = p.first;
hb_codepoint_t new_gid = pair.first;
hb_codepoint_t old_gid = pair.second;
glyf_impl::SubsetGlyph *p = glyphs.push ();
glyf_impl::SubsetGlyph& subset_glyph = *p;
subset_glyph.old_gid = old_gid;
if (unlikely (new_gid == 0 &&
if (unlikely (old_gid == 0 && new_gid == 0 &&
!(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) &&
!plan->normalized_coords)
subset_glyph.source_glyph = glyf_impl::Glyph ();
@ -487,7 +487,7 @@ glyf::_create_font_for_instancing (const hb_subset_plan_t *plan) const
{
hb_variation_t var;
var.tag = _.first;
var.value = _.second;
var.value = _.second.middle;
vars.push (var);
}

View File

@ -21,19 +21,15 @@ struct path_builder_t
operator bool () const { return has_data; }
bool has_data = false;
float x = 0.;
float y = 0.;
float x;
float y;
optional_point_t lerp (optional_point_t p, float t)
{ return optional_point_t (x + t * (p.x - x), y + t * (p.y - y)); }
optional_point_t mid (optional_point_t p)
{ return optional_point_t ((x + p.x) * 0.5f, (y + p.y) * 0.5f); }
} first_oncurve, first_offcurve, first_offcurve2, last_offcurve, last_offcurve2;
path_builder_t (hb_font_t *font_, hb_draw_session_t &draw_session_)
{
font = font_;
draw_session = &draw_session_;
first_oncurve = first_offcurve = first_offcurve2 = last_offcurve = last_offcurve2 = optional_point_t ();
}
path_builder_t (hb_font_t *font_, hb_draw_session_t &draw_session_) :
font (font_), draw_session (&draw_session_) {}
/* based on https://github.com/RazrFalcon/ttf-parser/blob/4f32821/src/glyf.rs#L287
See also:
@ -41,6 +37,7 @@ struct path_builder_t
* https://stackoverflow.com/a/20772557
*
* Cubic support added. */
HB_ALWAYS_INLINE
void consume_point (const contour_point_t &point)
{
bool is_on_curve = point.flag & glyf_impl::SimpleGlyph::FLAG_ON_CURVE;
@ -50,7 +47,7 @@ struct path_builder_t
bool is_cubic = !is_on_curve && (point.flag & glyf_impl::SimpleGlyph::FLAG_CUBIC);
#endif
optional_point_t p (font->em_fscalef_x (point.x), font->em_fscalef_y (point.y));
if (!first_oncurve)
if (unlikely (!first_oncurve))
{
if (is_on_curve)
{
@ -66,7 +63,7 @@ struct path_builder_t
}
else if (first_offcurve)
{
optional_point_t mid = first_offcurve.lerp (p, .5f);
optional_point_t mid = first_offcurve.mid (p);
first_oncurve = mid;
last_offcurve = p;
draw_session->move_to (mid.x, mid.y);
@ -102,7 +99,7 @@ struct path_builder_t
}
else
{
optional_point_t mid = last_offcurve.lerp (p, .5f);
optional_point_t mid = last_offcurve.mid (p);
if (is_cubic)
{
@ -127,13 +124,13 @@ struct path_builder_t
}
}
if (point.is_end_point)
if (unlikely (point.is_end_point))
{
if (first_offcurve && last_offcurve)
{
optional_point_t mid = last_offcurve.lerp (first_offcurve2 ?
first_offcurve2 :
first_offcurve, .5f);
optional_point_t mid = last_offcurve.mid (first_offcurve2 ?
first_offcurve2 :
first_offcurve);
if (last_offcurve2)
draw_session->cubic_to (last_offcurve2.x, last_offcurve2.y,
last_offcurve.x, last_offcurve.y,

View File

@ -359,7 +359,7 @@ struct name
record.nameID = ids.name_id;
record.length = 0; // handled in NameRecord copy()
record.offset = 0;
memcpy (name_records, &record, NameRecord::static_size);
hb_memcpy (name_records, &record, NameRecord::static_size);
name_records++;
}
#endif
@ -384,10 +384,7 @@ struct name
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
name *name_prime = c->serializer->start_embed<name> ();
if (unlikely (!name_prime)) return_trace (false);
auto *name_prime = c->serializer->start_embed<name> ();
#ifdef HB_EXPERIMENTAL_API
const hb_hashmap_t<hb_ot_name_record_ids_t, hb_bytes_t> *name_table_overrides =
@ -436,7 +433,7 @@ struct name
if (!name_table_overrides->is_empty ())
{
if (unlikely (!insert_name_records.alloc (name_table_overrides->get_population (), true)))
return_trace (false);
return false;
for (const auto& record_ids : name_table_overrides->keys ())
{
if (name_table_overrides->get (record_ids).length == 0)
@ -448,13 +445,13 @@ struct name
}
#endif
return (name_prime->serialize (c->serializer, it,
std::addressof (this + stringOffset)
return name_prime->serialize (c->serializer, it,
std::addressof (this + stringOffset)
#ifdef HB_EXPERIMENTAL_API
, insert_name_records
, name_table_overrides
, insert_name_records
, name_table_overrides
#endif
));
);
}
bool sanitize_records (hb_sanitize_context_t *c) const

View File

@ -106,7 +106,7 @@ STEP 6: TESTING
Look for manual related layout jtreg tests (test/jdk/java/awt/font/TextLayout)
and run on Windows,Linux and Mac.
Use Font2DTest set to TextLayout and check the above languages. Probably
not going to see layout problems a code point at a time but it needs to
not going to see layout problems in code at this point of time but it needs to
be checked.
Different unicode combinations can be checked using Font2DTest.

View File

@ -72,7 +72,7 @@ struct ClassDef : public OT::ClassDef
class_def_link->width = SmallTypes::size;
class_def_link->objidx = class_def_prime_id;
class_def_link->position = link_position;
class_def_prime_vertex.parents.push (parent_id);
class_def_prime_vertex.add_parent (parent_id);
return true;
}
@ -94,7 +94,13 @@ struct ClassDef : public OT::ClassDef
}
hb_bytes_t class_def_copy = serializer.copy_bytes ();
c.add_buffer ((char *) class_def_copy.arrayZ); // Give ownership to the context, it will cleanup the buffer.
if (!class_def_copy.arrayZ) return false;
// Give ownership to the context, it will cleanup the buffer.
if (!c.add_buffer ((char *) class_def_copy.arrayZ))
{
hb_free ((char *) class_def_copy.arrayZ);
return false;
}
auto& obj = c.graph.vertices_[dest_obj].obj;
obj.head = (char *) class_def_copy.arrayZ;

View File

@ -96,7 +96,7 @@ struct Coverage : public OT::Layout::Common::Coverage
coverage_link->width = SmallTypes::size;
coverage_link->objidx = coverage_prime_id;
coverage_link->position = link_position;
coverage_prime_vertex.parents.push (parent_id);
coverage_prime_vertex.add_parent (parent_id);
return (Coverage*) coverage_prime_vertex.obj.head;
}
@ -118,7 +118,13 @@ struct Coverage : public OT::Layout::Common::Coverage
}
hb_bytes_t coverage_copy = serializer.copy_bytes ();
c.add_buffer ((char *) coverage_copy.arrayZ); // Give ownership to the context, it will cleanup the buffer.
if (!coverage_copy.arrayZ) return false;
// Give ownership to the context, it will cleanup the buffer.
if (!c.add_buffer ((char *) coverage_copy.arrayZ))
{
hb_free ((char *) coverage_copy.arrayZ);
return false;
}
auto& obj = c.graph.vertices_[dest_obj].obj;
obj.head = (char *) coverage_copy.arrayZ;

View File

@ -43,12 +43,28 @@ struct graph_t
{
hb_serialize_context_t::object_t obj;
int64_t distance = 0 ;
int64_t space = 0 ;
hb_vector_t<unsigned> parents;
unsigned space = 0 ;
unsigned start = 0;
unsigned end = 0;
unsigned priority = 0;
private:
unsigned incoming_edges_ = 0;
unsigned single_parent = (unsigned) -1;
hb_hashmap_t<unsigned, unsigned> parents;
public:
auto parents_iter () const HB_AUTO_RETURN
(
hb_concat (
hb_iter (&single_parent, single_parent != (unsigned) -1),
parents.keys_ref ()
)
)
bool in_error () const
{
return parents.in_error ();
}
bool link_positions_valid (unsigned num_objects, bool removed_nil)
{
@ -143,7 +159,9 @@ struct graph_t
hb_swap (a.obj, b.obj);
hb_swap (a.distance, b.distance);
hb_swap (a.space, b.space);
hb_swap (a.single_parent, b.single_parent);
hb_swap (a.parents, b.parents);
hb_swap (a.incoming_edges_, b.incoming_edges_);
hb_swap (a.start, b.start);
hb_swap (a.end, b.end);
hb_swap (a.priority, b.priority);
@ -154,6 +172,7 @@ struct graph_t
{
hb_hashmap_t<unsigned, unsigned> result;
result.alloc (obj.real_links.length);
for (const auto& l : obj.real_links) {
result.set (l.position, l.objidx);
}
@ -163,27 +182,83 @@ struct graph_t
bool is_shared () const
{
return parents.length > 1;
return parents.get_population () > 1;
}
unsigned incoming_edges () const
{
return parents.length;
if (HB_DEBUG_SUBSET_REPACK)
{
assert (incoming_edges_ == (single_parent != (unsigned) -1) +
(parents.values_ref () | hb_reduce (hb_add, 0)));
}
return incoming_edges_;
}
void reset_parents ()
{
incoming_edges_ = 0;
single_parent = (unsigned) -1;
parents.reset ();
}
void add_parent (unsigned parent_index)
{
assert (parent_index != (unsigned) -1);
if (incoming_edges_ == 0)
{
single_parent = parent_index;
incoming_edges_ = 1;
return;
}
else if (single_parent != (unsigned) -1)
{
assert (incoming_edges_ == 1);
if (!parents.set (single_parent, 1))
return;
single_parent = (unsigned) -1;
}
unsigned *v;
if (parents.has (parent_index, &v))
{
(*v)++;
incoming_edges_++;
}
else if (parents.set (parent_index, 1))
incoming_edges_++;
}
void remove_parent (unsigned parent_index)
{
for (unsigned i = 0; i < parents.length; i++)
if (parent_index == single_parent)
{
if (parents[i] != parent_index) continue;
parents.remove_unordered (i);
break;
single_parent = (unsigned) -1;
incoming_edges_--;
return;
}
unsigned *v;
if (parents.has (parent_index, &v))
{
incoming_edges_--;
if (*v > 1)
(*v)--;
else
parents.del (parent_index);
if (incoming_edges_ == 1)
{
single_parent = *parents.keys ();
parents.reset ();
}
}
}
void remove_real_link (unsigned child_index, const void* offset)
{
for (unsigned i = 0; i < obj.real_links.length; i++)
unsigned count = obj.real_links.length;
for (unsigned i = 0; i < count; i++)
{
auto& link = obj.real_links.arrayZ[i];
if (link.objidx != child_index)
@ -197,18 +272,53 @@ struct graph_t
}
}
void remap_parents (const hb_vector_t<unsigned>& id_map)
bool remap_parents (const hb_vector_t<unsigned>& id_map)
{
for (unsigned i = 0; i < parents.length; i++)
parents[i] = id_map[parents[i]];
if (single_parent != (unsigned) -1)
{
assert (single_parent < id_map.length);
single_parent = id_map[single_parent];
return true;
}
hb_hashmap_t<unsigned, unsigned> new_parents;
new_parents.alloc (parents.get_population ());
for (auto _ : parents)
{
assert (_.first < id_map.length);
assert (!new_parents.has (id_map[_.first]));
new_parents.set (id_map[_.first], _.second);
}
if (parents.in_error() || new_parents.in_error ())
return false;
parents = std::move (new_parents);
return true;
}
void remap_parent (unsigned old_index, unsigned new_index)
{
for (unsigned i = 0; i < parents.length; i++)
if (single_parent != (unsigned) -1)
{
if (parents[i] == old_index)
parents[i] = new_index;
if (single_parent == old_index)
single_parent = new_index;
return;
}
const unsigned *pv;
if (parents.has (old_index, &pv))
{
unsigned v = *pv;
if (!parents.set (new_index, v))
incoming_edges_ -= v;
parents.del (old_index);
if (incoming_edges_ == 1)
{
single_parent = *parents.keys ();
parents.reset ();
}
}
}
@ -328,11 +438,12 @@ struct graph_t
bool removed_nil = false;
vertices_.alloc (objects.length);
vertices_scratch_.alloc (objects.length);
for (unsigned i = 0; i < objects.length; i++)
unsigned count = objects.length;
for (unsigned i = 0; i < count; i++)
{
// If this graph came from a serialization buffer object 0 is the
// nil object. We don't need it for our purposes here so drop it.
if (i == 0 && !objects[i])
if (i == 0 && !objects.arrayZ[i])
{
removed_nil = true;
continue;
@ -340,9 +451,9 @@ struct graph_t
vertex_t* v = vertices_.push ();
if (check_success (!vertices_.in_error ()))
v->obj = *objects[i];
v->obj = *objects.arrayZ[i];
check_success (v->link_positions_valid (objects.length, removed_nil));
check_success (v->link_positions_valid (count, removed_nil));
if (!removed_nil) continue;
// Fix indices to account for removed nil object.
@ -354,7 +465,6 @@ struct graph_t
~graph_t ()
{
vertices_.fini ();
for (char* b : buffers)
hb_free (b);
}
@ -364,6 +474,18 @@ struct graph_t
return root ().equals (other.root (), *this, other, 0);
}
void print () const {
for (int i = vertices_.length - 1; i >= 0; i--)
{
const auto& v = vertices_[i];
printf("%d: %u [", i, (unsigned int)v.table_size());
for (const auto &l : v.obj.real_links) {
printf("%u, ", l.objidx);
}
printf("]\n");
}
}
// Sorts links of all objects in a consistent manner and zeroes all offsets.
void normalize ()
{
@ -396,9 +518,10 @@ struct graph_t
return vertices_[i].obj;
}
void add_buffer (char* buffer)
bool add_buffer (char* buffer)
{
buffers.push (buffer);
return !buffers.in_error ();
}
/*
@ -414,7 +537,7 @@ struct graph_t
link->width = 2;
link->objidx = child_id;
link->position = (char*) offset - (char*) v.obj.head;
vertices_[child_id].parents.push (parent_id);
vertices_[child_id].add_parent (parent_id);
}
/*
@ -443,7 +566,7 @@ struct graph_t
update_distances ();
hb_priority_queue_t queue;
hb_priority_queue_t<int64_t> queue;
hb_vector_t<vertex_t> &sorted_graph = vertices_scratch_;
if (unlikely (!check_success (sorted_graph.resize (vertices_.length)))) return;
hb_vector_t<unsigned> id_map;
@ -460,7 +583,7 @@ struct graph_t
{
unsigned next_id = queue.pop_minimum().second;
hb_swap (sorted_graph[new_id], vertices_[next_id]);
sorted_graph[new_id] = std::move (vertices_[next_id]);
const vertex_t& next = sorted_graph[new_id];
if (unlikely (!check_success(new_id >= 0))) {
@ -488,8 +611,8 @@ struct graph_t
check_success (!queue.in_error ());
check_success (!sorted_graph.in_error ());
remap_all_obj_indices (id_map, &sorted_graph);
hb_swap (vertices_, sorted_graph);
check_success (remap_all_obj_indices (id_map, &sorted_graph));
vertices_ = std::move (sorted_graph);
if (!check_success (new_id == -1))
print_orphaned_nodes ();
@ -579,8 +702,8 @@ struct graph_t
const auto& node = object (node_idx);
if (offset < node.head || offset >= node.tail) return -1;
unsigned length = node.real_links.length;
for (unsigned i = 0; i < length; i++)
unsigned count = node.real_links.length;
for (unsigned i = 0; i < count; i++)
{
// Use direct access for increased performance, this is a hot method.
const auto& link = node.real_links.arrayZ[i];
@ -600,7 +723,7 @@ struct graph_t
{
unsigned child_idx = index_for_offset (node_idx, offset);
auto& child = vertices_[child_idx];
for (unsigned p : child.parents)
for (unsigned p : child.parents_iter ())
{
if (p != node_idx) {
return duplicate (node_idx, child_idx);
@ -683,12 +806,15 @@ struct graph_t
subgraph.set (root_idx, wide_parents (root_idx, parents));
find_subgraph (root_idx, subgraph);
}
if (subgraph.in_error ())
return false;
unsigned original_root_idx = root_idx ();
hb_map_t index_map;
bool made_changes = false;
for (auto entry : subgraph.iter ())
{
assert (entry.first < vertices_.length);
const auto& node = vertices_[entry.first];
unsigned subgraph_incoming_edges = entry.second;
@ -727,8 +853,7 @@ struct graph_t
remap_obj_indices (index_map, parents.iter (), true);
// Update roots set with new indices as needed.
uint32_t next = HB_SET_VALUE_INVALID;
while (roots.next (&next))
for (auto next : roots)
{
const uint32_t *v;
if (index_map.has (next, &v))
@ -745,10 +870,10 @@ struct graph_t
{
for (const auto& link : vertices_[node_idx].obj.all_links ())
{
const uint32_t *v;
hb_codepoint_t *v;
if (subgraph.has (link.objidx, &v))
{
subgraph.set (link.objidx, *v + 1);
(*v)++;
continue;
}
subgraph.set (link.objidx, 1);
@ -820,7 +945,7 @@ struct graph_t
new_link->position = (const char*) new_offset - (const char*) new_v.obj.head;
auto& child = vertices_[child_id];
child.parents.push (new_parent_idx);
child.add_parent (new_parent_idx);
old_v.remove_real_link (child_id, old_offset);
child.remove_parent (old_parent_idx);
@ -864,18 +989,18 @@ struct graph_t
clone->obj.tail = child.obj.tail;
clone->distance = child.distance;
clone->space = child.space;
clone->parents.reset ();
clone->reset_parents ();
unsigned clone_idx = vertices_.length - 2;
for (const auto& l : child.obj.real_links)
{
clone->obj.real_links.push (l);
vertices_[l.objidx].parents.push (clone_idx);
vertices_[l.objidx].add_parent (clone_idx);
}
for (const auto& l : child.obj.virtual_links)
{
clone->obj.virtual_links.push (l);
vertices_[l.objidx].parents.push (clone_idx);
vertices_[l.objidx].add_parent (clone_idx);
}
check_success (!clone->obj.real_links.in_error ());
@ -1004,13 +1129,13 @@ struct graph_t
{
update_parents();
if (root().parents)
if (root().incoming_edges ())
// Root cannot have parents.
return false;
for (unsigned i = 0; i < root_idx (); i++)
{
if (!vertices_[i].parents)
if (!vertices_[i].incoming_edges ())
return false;
}
return true;
@ -1074,14 +1199,14 @@ struct graph_t
parents_invalid = true;
update_parents();
if (root().parents) {
if (root().incoming_edges ()) {
DEBUG_MSG (SUBSET_REPACK, nullptr, "Root node has incoming edges.");
}
for (unsigned i = 0; i < root_idx (); i++)
{
const auto& v = vertices_[i];
if (!v.parents)
if (!v.incoming_edges ())
DEBUG_MSG (SUBSET_REPACK, nullptr, "Node %u is orphaned.", i);
}
}
@ -1113,6 +1238,8 @@ struct graph_t
unsigned space_for (unsigned index, unsigned* root = nullptr) const
{
loop:
assert (index < vertices_.length);
const auto& node = vertices_[index];
if (node.space)
{
@ -1121,22 +1248,24 @@ struct graph_t
return node.space;
}
if (!node.parents)
if (!node.incoming_edges ())
{
if (root)
*root = index;
return 0;
}
return space_for (node.parents[0], root);
index = *node.parents_iter ();
goto loop;
}
void err_other_error () { this->successful = false; }
size_t total_size_in_bytes () const {
size_t total_size = 0;
for (unsigned i = 0; i < vertices_.length; i++) {
size_t size = vertices_[i].obj.tail - vertices_[i].obj.head;
unsigned count = vertices_.length;
for (unsigned i = 0; i < count; i++) {
size_t size = vertices_.arrayZ[i].obj.tail - vertices_.arrayZ[i].obj.head;
total_size += size;
}
return total_size;
@ -1151,12 +1280,8 @@ struct graph_t
unsigned wide_parents (unsigned node_idx, hb_set_t& parents) const
{
unsigned count = 0;
hb_set_t visited;
for (unsigned p : vertices_[node_idx].parents)
for (unsigned p : vertices_[node_idx].parents_iter ())
{
if (visited.has (p)) continue;
visited.add (p);
// Only real links can be wide
for (const auto& l : vertices_[p].obj.real_links)
{
@ -1183,21 +1308,21 @@ struct graph_t
{
if (!parents_invalid) return;
for (unsigned i = 0; i < vertices_.length; i++)
vertices_[i].parents.reset ();
unsigned count = vertices_.length;
for (unsigned p = 0; p < vertices_.length; p++)
for (unsigned i = 0; i < count; i++)
vertices_.arrayZ[i].reset_parents ();
for (unsigned p = 0; p < count; p++)
{
for (auto& l : vertices_[p].obj.all_links ())
{
vertices_[l.objidx].parents.push (p);
}
for (auto& l : vertices_.arrayZ[p].obj.all_links ())
vertices_[l.objidx].add_parent (p);
}
for (unsigned i = 0; i < vertices_.length; i++)
for (unsigned i = 0; i < count; i++)
// parents arrays must be accurate or downstream operations like cycle detection
// and sorting won't work correctly.
check_success (!vertices_[i].parents.in_error ());
check_success (!vertices_.arrayZ[i].in_error ());
parents_invalid = false;
}
@ -1239,15 +1364,12 @@ struct graph_t
// According to https://www3.cs.stonybrook.edu/~rezaul/papers/TR-07-54.pdf
// for practical performance this is faster then using a more advanced queue
// (such as a fibonacci queue) with a fast decrease priority.
for (unsigned i = 0; i < vertices_.length; i++)
{
if (i == vertices_.length - 1)
vertices_[i].distance = 0;
else
vertices_[i].distance = hb_int_max (int64_t);
}
unsigned count = vertices_.length;
for (unsigned i = 0; i < count; i++)
vertices_.arrayZ[i].distance = hb_int_max (int64_t);
vertices_.tail ().distance = 0;
hb_priority_queue_t queue;
hb_priority_queue_t<int64_t> queue;
queue.insert (0, vertices_.length - 1);
hb_vector_t<bool> visited;
@ -1265,15 +1387,15 @@ struct graph_t
{
if (visited[link.objidx]) continue;
const auto& child = vertices_[link.objidx].obj;
const auto& child = vertices_.arrayZ[link.objidx].obj;
unsigned link_width = link.width ? link.width : 4; // treat virtual offsets as 32 bits wide
int64_t child_weight = (child.tail - child.head) +
((int64_t) 1 << (link_width * 8)) * (vertices_[link.objidx].space + 1);
((int64_t) 1 << (link_width * 8)) * (vertices_.arrayZ[link.objidx].space + 1);
int64_t child_distance = next_distance + child_weight;
if (child_distance < vertices_[link.objidx].distance)
if (child_distance < vertices_.arrayZ[link.objidx].distance)
{
vertices_[link.objidx].distance = child_distance;
vertices_.arrayZ[link.objidx].distance = child_distance;
queue.insert (child_distance, link.objidx);
}
}
@ -1301,7 +1423,7 @@ struct graph_t
unsigned old_idx = link.objidx;
link.objidx = new_idx;
vertices_[old_idx].remove_parent (parent_idx);
vertices_[new_idx].parents.push (parent_idx);
vertices_[new_idx].add_parent (parent_idx);
}
/*
@ -1329,17 +1451,20 @@ struct graph_t
/*
* Updates all objidx's in all links using the provided mapping.
*/
void remap_all_obj_indices (const hb_vector_t<unsigned>& id_map,
bool remap_all_obj_indices (const hb_vector_t<unsigned>& id_map,
hb_vector_t<vertex_t>* sorted_graph) const
{
for (unsigned i = 0; i < sorted_graph->length; i++)
unsigned count = sorted_graph->length;
for (unsigned i = 0; i < count; i++)
{
(*sorted_graph)[i].remap_parents (id_map);
for (auto& link : (*sorted_graph)[i].obj.all_links_writer ())
if (!(*sorted_graph)[i].remap_parents (id_map))
return false;
for (auto& link : sorted_graph->arrayZ[i].obj.all_links_writer ())
{
link.objidx = id_map[link.objidx];
}
}
return true;
}
/*
@ -1370,7 +1495,7 @@ struct graph_t
for (const auto& l : v.obj.all_links ())
find_connected_nodes (l.objidx, targets, visited, connected);
for (unsigned p : v.parents)
for (unsigned p : v.parents_iter ())
find_connected_nodes (p, targets, visited, connected);
}

View File

@ -52,7 +52,11 @@ unsigned gsubgpos_graph_context_t::create_node (unsigned size)
if (!buffer)
return -1;
add_buffer (buffer);
if (!add_buffer (buffer)) {
// Allocation did not get stored for freeing later.
hb_free (buffer);
return -1;
}
return graph.new_node (buffer, buffer + size);
}

View File

@ -40,16 +40,16 @@ struct gsubgpos_graph_context_t
graph_t& graph;
unsigned lookup_list_index;
hb_hashmap_t<unsigned, graph::Lookup*> lookups;
hb_hashmap_t<unsigned, unsigned> subtable_to_extension;
HB_INTERNAL gsubgpos_graph_context_t (hb_tag_t table_tag_,
graph_t& graph_);
HB_INTERNAL unsigned create_node (unsigned size);
void add_buffer (char* buffer)
bool add_buffer (char* buffer)
{
graph.add_buffer (buffer);
return graph.add_buffer (buffer);
}
private:

View File

@ -166,7 +166,7 @@ struct Lookup : public OT::Lookup
}
if (all_new_subtables) {
add_sub_tables (c, this_index, type, all_new_subtables);
return add_sub_tables (c, this_index, type, all_new_subtables);
}
return true;
@ -184,7 +184,7 @@ struct Lookup : public OT::Lookup
return sub_table->split_subtables (c, parent_idx, objidx);
}
void add_sub_tables (gsubgpos_graph_context_t& c,
bool add_sub_tables (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned type,
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
@ -200,7 +200,12 @@ struct Lookup : public OT::Lookup
size_t new_size = v.table_size ()
+ new_subtable_count * OT::Offset16::static_size;
char* buffer = (char*) hb_calloc (1, new_size);
c.add_buffer (buffer);
if (!buffer) return false;
if (!c.add_buffer (buffer))
{
hb_free (buffer);
return false;
}
hb_memcpy (buffer, v.obj.head, v.table_size());
v.obj.head = buffer;
@ -220,7 +225,7 @@ struct Lookup : public OT::Lookup
if (is_ext)
{
unsigned ext_id = create_extension_subtable (c, subtable_id, type);
c.graph.vertices_[subtable_id].parents.push (ext_id);
c.graph.vertices_[subtable_id].add_parent (ext_id);
subtable_id = ext_id;
}
@ -229,7 +234,7 @@ struct Lookup : public OT::Lookup
link->objidx = subtable_id;
link->position = (char*) &new_lookup->subTable[offset_index++] -
(char*) new_lookup;
c.graph.vertices_[subtable_id].parents.push (this_index);
c.graph.vertices_[subtable_id].add_parent (this_index);
}
}
@ -239,6 +244,7 @@ struct Lookup : public OT::Lookup
// The head location of the lookup has changed, invalidating the lookups map entry
// in the context. Update the map.
c.lookups.set (this_index, new_lookup);
return true;
}
void fix_existing_subtable_links (gsubgpos_graph_context_t& c,
@ -293,24 +299,35 @@ struct Lookup : public OT::Lookup
unsigned subtable_index)
{
unsigned type = lookupType;
unsigned ext_index = -1;
unsigned* existing_ext_index = nullptr;
if (c.subtable_to_extension.has(subtable_index, &existing_ext_index)) {
ext_index = *existing_ext_index;
} else {
ext_index = create_extension_subtable(c, subtable_index, type);
c.subtable_to_extension.set(subtable_index, ext_index);
}
unsigned ext_index = create_extension_subtable(c, subtable_index, type);
if (ext_index == (unsigned) -1)
return false;
auto& subtable_vertex = c.graph.vertices_[subtable_index];
auto& lookup_vertex = c.graph.vertices_[lookup_index];
for (auto& l : lookup_vertex.obj.real_links.writer ())
{
if (l.objidx == subtable_index)
if (l.objidx == subtable_index) {
// Change lookup to point at the extension.
l.objidx = ext_index;
if (existing_ext_index)
subtable_vertex.remove_parent(lookup_index);
}
}
// Make extension point at the subtable.
auto& ext_vertex = c.graph.vertices_[ext_index];
auto& subtable_vertex = c.graph.vertices_[subtable_index];
ext_vertex.parents.push (lookup_index);
subtable_vertex.remap_parent (lookup_index, ext_index);
ext_vertex.add_parent (lookup_index);
if (!existing_ext_index)
subtable_vertex.remap_parent (lookup_index, ext_index);
return true;
}

View File

@ -217,7 +217,7 @@ struct MarkBasePosFormat1 : public OT::Layout::GPOS_impl::MarkBasePosFormat1_2<S
const unsigned base_coverage_id = c.graph.index_for_offset (this_index, &baseCoverage);
const unsigned base_size =
OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size +
OT::Layout::GPOS_impl::MarkBasePosFormat1_2<SmallTypes>::min_size +
MarkArray::min_size +
AnchorMatrix::min_size +
c.graph.vertices_[base_coverage_id].table_size ();
@ -318,8 +318,11 @@ struct MarkBasePosFormat1 : public OT::Layout::GPOS_impl::MarkBasePosFormat1_2<S
{
hb_vector_t<class_info_t> class_to_info;
unsigned class_count= classCount;
class_to_info.resize (class_count);
unsigned class_count = classCount;
if (!class_count) return class_to_info;
if (!class_to_info.resize (class_count))
return hb_vector_t<class_info_t>();
auto mark_array = c.graph.as_table<MarkArray> (this_index, &markArray);
if (!mark_array) return hb_vector_t<class_info_t> ();
@ -327,6 +330,7 @@ struct MarkBasePosFormat1 : public OT::Layout::GPOS_impl::MarkBasePosFormat1_2<S
for (unsigned mark = 0; mark < mark_count; mark++)
{
unsigned klass = (*mark_array.table)[mark].get_class ();
if (klass >= class_count) continue;
class_to_info[klass].marks.add (mark);
}
@ -335,6 +339,7 @@ struct MarkBasePosFormat1 : public OT::Layout::GPOS_impl::MarkBasePosFormat1_2<S
unsigned mark = (link.position - 2) /
OT::Layout::GPOS_impl::MarkRecord::static_size;
unsigned klass = (*mark_array.table)[mark].get_class ();
if (klass >= class_count) continue;
class_to_info[klass].child_indices.push (link.objidx);
}
@ -479,7 +484,7 @@ struct MarkBasePos : public OT::Layout::GPOS_impl::MarkBasePos
return ((MarkBasePosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
#ifndef HB_NO_BEYOND_64K
case 2: HB_FALLTHROUGH;
// Don't split 24bit PairPos's.
// Don't split 24bit MarkBasePos's.
#endif
default:
return hb_vector_t<unsigned> ();

View File

@ -215,7 +215,7 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType
auto gid_and_class =
+ coverage->iter ()
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
return hb_pair_t<hb_codepoint_t, hb_codepoint_t> (gid, class_def_1->get_class (gid));
return hb_codepoint_pair_t (gid, class_def_1->get_class (gid));
})
;
class_def_size_estimator_t estimator (gid_and_class);
@ -386,14 +386,14 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType
auto klass_map =
+ coverage_table->iter ()
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
return hb_pair_t<hb_codepoint_t, hb_codepoint_t> (gid, class_def_1_table->get_class (gid));
return hb_codepoint_pair_t (gid, class_def_1_table->get_class (gid));
})
| hb_filter ([&] (hb_codepoint_t klass) {
return klass >= start && klass < end;
}, hb_second)
| hb_map_retains_sorting ([&] (hb_pair_t<hb_codepoint_t, hb_codepoint_t> gid_and_class) {
| hb_map_retains_sorting ([&] (hb_codepoint_pair_t gid_and_class) {
// Classes must be from 0...N so subtract start
return hb_pair_t<hb_codepoint_t, hb_codepoint_t> (gid_and_class.first, gid_and_class.second - start);
return hb_codepoint_pair_t (gid_and_class.first, gid_and_class.second - start);
})
;
@ -419,7 +419,7 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType
class_def_link->width = SmallTypes::size;
class_def_link->objidx = class_def_2_id;
class_def_link->position = 10;
graph.vertices_[class_def_2_id].parents.push (pair_pos_prime_id);
graph.vertices_[class_def_2_id].add_parent (pair_pos_prime_id);
graph.duplicate (pair_pos_prime_id, class_def_2_id);
return pair_pos_prime_id;
@ -519,7 +519,7 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType
auto klass_map =
+ coverage.table->iter ()
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
return hb_pair_t<hb_codepoint_t, hb_codepoint_t> (gid, class_def_1.table->get_class (gid));
return hb_codepoint_pair_t (gid, class_def_1.table->get_class (gid));
})
| hb_filter ([&] (hb_codepoint_t klass) {
return klass < count;

View File

@ -116,10 +116,10 @@ will_overflow (graph_t& graph,
for (int parent_idx = vertices.length - 1; parent_idx >= 0; parent_idx--)
{
// Don't need to check virtual links for overflow
for (const auto& link : vertices[parent_idx].obj.real_links)
for (const auto& link : vertices.arrayZ[parent_idx].obj.real_links)
{
int64_t offset = compute_offset (graph, parent_idx, link);
if (is_valid_offset (offset, link))
if (likely (is_valid_offset (offset, link)))
continue;
if (!overflows) return true;
@ -226,6 +226,9 @@ inline hb_blob_t* serialize (const graph_t& graph)
{
hb_vector_t<char> buffer;
size_t size = graph.total_size_in_bytes ();
if (!size) return hb_blob_get_empty ();
if (!buffer.alloc (size)) {
DEBUG_MSG (SUBSET_REPACK, nullptr, "Unable to allocate output buffer.");
return nullptr;

View File

@ -851,43 +851,41 @@ struct StateTableDriver
*
* https://github.com/harfbuzz/harfbuzz/issues/2860
*/
const EntryT *wouldbe_entry;
bool safe_to_break =
/* 1. */
!c->is_actionable (this, entry)
&&
/* 2. */
(
/* 2a. */
state == StateTableT::STATE_START_OF_TEXT
||
/* 2b. */
(
(entry.flags & context_t::DontAdvance) &&
next_state == StateTableT::STATE_START_OF_TEXT
)
||
/* 2c. */
(
wouldbe_entry = &machine.get_entry (StateTableT::STATE_START_OF_TEXT, klass)
,
/* 2c'. */
!c->is_actionable (this, *wouldbe_entry)
&&
/* 2c". */
(
next_state == machine.new_state (wouldbe_entry->newState)
&&
(entry.flags & context_t::DontAdvance) == (wouldbe_entry->flags & context_t::DontAdvance)
)
)
)
&&
/* 3. */
!c->is_actionable (this, machine.get_entry (state, StateTableT::CLASS_END_OF_TEXT))
;
if (!safe_to_break && buffer->backtrack_len () && buffer->idx < buffer->len)
const auto is_safe_to_break_extra = [&]()
{
/* 2c. */
const auto wouldbe_entry = machine.get_entry(StateTableT::STATE_START_OF_TEXT, klass);
/* 2c'. */
if (c->is_actionable (this, wouldbe_entry))
return false;
/* 2c". */
return next_state == machine.new_state(wouldbe_entry.newState)
&& (entry.flags & context_t::DontAdvance) == (wouldbe_entry.flags & context_t::DontAdvance);
};
const auto is_safe_to_break = [&]()
{
/* 1. */
if (c->is_actionable (this, entry))
return false;
/* 2. */
// This one is meh, I know...
const auto ok =
state == StateTableT::STATE_START_OF_TEXT
|| ((entry.flags & context_t::DontAdvance) && next_state == StateTableT::STATE_START_OF_TEXT)
|| is_safe_to_break_extra();
if (!ok)
return false;
/* 3. */
return !c->is_actionable (this, machine.get_entry (state, StateTableT::CLASS_END_OF_TEXT));
};
if (!is_safe_to_break () && buffer->backtrack_len () && buffer->idx < buffer->len)
buffer->unsafe_to_break_from_outbuffer (buffer->backtrack_len () - 1, buffer->idx + 1);
c->transition (this, entry);

View File

@ -111,13 +111,13 @@ struct TrackData
break;
}
}
if (!trackTableEntry) return 0.;
if (!trackTableEntry) return 0;
/*
* Choose size.
*/
unsigned int sizes = nSizes;
if (!sizes) return 0.;
if (!sizes) return 0;
if (sizes == 1) return trackTableEntry->get_value (base, 0, sizes);
hb_array_t<const F16DOT16> size_table ((base+sizeTable).arrayZ, sizes);

View File

@ -55,7 +55,13 @@ AAT::hb_aat_apply_context_t::hb_aat_apply_context_t (const hb_ot_shape_plan_t *p
buffer (buffer_),
sanitizer (),
ankr_table (&Null (AAT::ankr)),
gdef_table (face->table.GDEF->table),
gdef_table (
#ifndef HB_NO_OT_LAYOUT
face->table.GDEF->table
#else
&Null (GDEF)
#endif
),
lookup_index (0)
{
sanitizer.init (blob);

View File

@ -87,6 +87,19 @@ static inline constexpr uint16_t hb_uint16_swap (uint16_t v)
static inline constexpr uint32_t hb_uint32_swap (uint32_t v)
{ return (hb_uint16_swap (v) << 16) | hb_uint16_swap (v >> 16); }
#ifndef HB_FAST_INT_ACCESS
#if defined(__OPTIMIZE__) && \
defined(__BYTE_ORDER) && \
(__BYTE_ORDER == __BIG_ENDIAN || \
(__BYTE_ORDER == __LITTLE_ENDIAN && \
hb_has_builtin(__builtin_bswap16) && \
hb_has_builtin(__builtin_bswap32)))
#define HB_FAST_INT_ACCESS 1
#else
#define HB_FAST_INT_ACCESS 0
#endif
#endif
template <typename Type, int Bytes = sizeof (Type)>
struct BEInt;
template <typename Type>
@ -101,21 +114,25 @@ struct BEInt<Type, 1>
template <typename Type>
struct BEInt<Type, 2>
{
struct __attribute__((packed)) packed_uint16_t { uint16_t v; };
public:
BEInt () = default;
constexpr BEInt (Type V) : v {uint8_t ((V >> 8) & 0xFF),
uint8_t ((V ) & 0xFF)} {}
struct __attribute__((packed)) packed_uint16_t { uint16_t v; };
constexpr operator Type () const
{
#if defined(__OPTIMIZE__) && !defined(HB_NO_PACKED) && \
defined(__BYTE_ORDER) && \
(__BYTE_ORDER == __BIG_ENDIAN || \
(__BYTE_ORDER == __LITTLE_ENDIAN && \
hb_has_builtin(__builtin_bswap16)))
/* Spoon-feed the compiler a big-endian integer with alignment 1.
* https://github.com/harfbuzz/harfbuzz/pull/1398 */
BEInt (Type V)
#if HB_FAST_INT_ACCESS
#if __BYTE_ORDER == __LITTLE_ENDIAN
{ ((packed_uint16_t *) v)->v = __builtin_bswap16 (V); }
#else /* __BYTE_ORDER == __BIG_ENDIAN */
{ ((packed_uint16_t *) v)->v = V; }
#endif
#else
: v {uint8_t ((V >> 8) & 0xFF),
uint8_t ((V ) & 0xFF)} {}
#endif
constexpr operator Type () const {
#if HB_FAST_INT_ACCESS
#if __BYTE_ORDER == __LITTLE_ENDIAN
return __builtin_bswap16 (((packed_uint16_t *) v)->v);
#else /* __BYTE_ORDER == __BIG_ENDIAN */
@ -146,22 +163,27 @@ struct BEInt<Type, 3>
template <typename Type>
struct BEInt<Type, 4>
{
struct __attribute__((packed)) packed_uint32_t { uint32_t v; };
public:
BEInt () = default;
constexpr BEInt (Type V) : v {uint8_t ((V >> 24) & 0xFF),
uint8_t ((V >> 16) & 0xFF),
uint8_t ((V >> 8) & 0xFF),
uint8_t ((V ) & 0xFF)} {}
struct __attribute__((packed)) packed_uint32_t { uint32_t v; };
BEInt (Type V)
#if HB_FAST_INT_ACCESS
#if __BYTE_ORDER == __LITTLE_ENDIAN
{ ((packed_uint32_t *) v)->v = __builtin_bswap32 (V); }
#else /* __BYTE_ORDER == __BIG_ENDIAN */
{ ((packed_uint32_t *) v)->v = V; }
#endif
#else
: v {uint8_t ((V >> 24) & 0xFF),
uint8_t ((V >> 16) & 0xFF),
uint8_t ((V >> 8) & 0xFF),
uint8_t ((V ) & 0xFF)} {}
#endif
constexpr operator Type () const {
#if defined(__OPTIMIZE__) && !defined(HB_NO_PACKED) && \
defined(__BYTE_ORDER) && \
(__BYTE_ORDER == __BIG_ENDIAN || \
(__BYTE_ORDER == __LITTLE_ENDIAN && \
hb_has_builtin(__builtin_bswap32)))
/* Spoon-feed the compiler a big-endian integer with alignment 1.
* https://github.com/harfbuzz/harfbuzz/pull/1398 */
#if HB_FAST_INT_ACCESS
#if __BYTE_ORDER == __LITTLE_ENDIAN
return __builtin_bswap32 (((packed_uint32_t *) v)->v);
#else /* __BYTE_ORDER == __BIG_ENDIAN */
@ -231,12 +253,123 @@ struct
}
HB_FUNCOBJ (hb_bool);
/* The MIT License
Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com)
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// Compression function for Merkle-Damgard construction.
// This function is generated using the framework provided.
#define mix(h) ( \
(void) ((h) ^= (h) >> 23), \
(void) ((h) *= 0x2127599bf4325c37ULL), \
(h) ^= (h) >> 47)
static inline uint64_t fasthash64(const void *buf, size_t len, uint64_t seed)
{
struct __attribute__((packed)) packed_uint64_t { uint64_t v; };
const uint64_t m = 0x880355f21e6d1965ULL;
const packed_uint64_t *pos = (const packed_uint64_t *)buf;
const packed_uint64_t *end = pos + (len / 8);
const unsigned char *pos2;
uint64_t h = seed ^ (len * m);
uint64_t v;
#ifndef HB_OPTIMIZE_SIZE
if (((uintptr_t) pos & 7) == 0)
{
while (pos != end)
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-align"
v = * (const uint64_t *) (pos++);
#pragma GCC diagnostic pop
h ^= mix(v);
h *= m;
}
}
else
#endif
{
while (pos != end)
{
v = pos++->v;
h ^= mix(v);
h *= m;
}
}
pos2 = (const unsigned char*)pos;
v = 0;
switch (len & 7) {
case 7: v ^= (uint64_t)pos2[6] << 48; HB_FALLTHROUGH;
case 6: v ^= (uint64_t)pos2[5] << 40; HB_FALLTHROUGH;
case 5: v ^= (uint64_t)pos2[4] << 32; HB_FALLTHROUGH;
case 4: v ^= (uint64_t)pos2[3] << 24; HB_FALLTHROUGH;
case 3: v ^= (uint64_t)pos2[2] << 16; HB_FALLTHROUGH;
case 2: v ^= (uint64_t)pos2[1] << 8; HB_FALLTHROUGH;
case 1: v ^= (uint64_t)pos2[0];
h ^= mix(v);
h *= m;
}
return mix(h);
}
static inline uint32_t fasthash32(const void *buf, size_t len, uint32_t seed)
{
// the following trick converts the 64-bit hashcode to Fermat
// residue, which shall retain information from both the higher
// and lower parts of hashcode.
uint64_t h = fasthash64(buf, len, seed);
return h - (h >> 32);
}
struct
{
private:
template <typename T> constexpr auto
impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, hb_deref (v).hash ())
impl (const T& v, hb_priority<2>) const HB_RETURN (uint32_t, hb_deref (v).hash ())
// Horrible: std:hash() of integers seems to be identity in gcc / clang?!
// https://github.com/harfbuzz/harfbuzz/pull/4228
//
// For performance characteristics see:
// https://github.com/harfbuzz/harfbuzz/pull/4228#issuecomment-1565079537
template <typename T,
hb_enable_if (std::is_integral<T>::value && sizeof (T) <= sizeof (uint32_t))> constexpr auto
impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, (uint32_t) v * 2654435761u /* Knuh's multiplicative hash */)
template <typename T,
hb_enable_if (std::is_integral<T>::value && sizeof (T) > sizeof (uint32_t))> constexpr auto
impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, (uint32_t) (v ^ (v >> 32)) * 2654435761u /* Knuth's multiplicative hash */)
template <typename T,
hb_enable_if (std::is_floating_point<T>::value)> constexpr auto
impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, fasthash32 (std::addressof (v), sizeof (T), 0xf437ffe6))
template <typename T> constexpr auto
impl (const T& v, hb_priority<0>) const HB_RETURN (uint32_t, std::hash<hb_decay<decltype (hb_deref (v))>>{} (hb_deref (v)))
@ -551,6 +684,8 @@ struct hb_pair_t
template <typename T1, typename T2> static inline hb_pair_t<T1, T2>
hb_pair (T1&& a, T2&& b) { return hb_pair_t<T1, T2> (a, b); }
typedef hb_pair_t<hb_codepoint_t, hb_codepoint_t> hb_codepoint_pair_t;
struct
{
template <typename Pair> constexpr typename Pair::first_t
@ -626,8 +761,10 @@ hb_popcount (T v)
if (sizeof (T) == 8)
{
unsigned int shift = 32;
return hb_popcount<uint32_t> ((uint32_t) v) + hb_popcount ((uint32_t) (v >> shift));
uint64_t y = (uint64_t) v;
y -= ((y >> 1) & 0x5555555555555555ull);
y = (y & 0x3333333333333333ull) + (y >> 2 & 0x3333333333333333ull);
return ((y + (y >> 4)) & 0xf0f0f0f0f0f0f0full) * 0x101010101010101ull >> 56;
}
if (sizeof (T) == 16)
@ -851,7 +988,7 @@ static inline void *
hb_memset (void *s, int c, unsigned int n)
{
/* It's illegal to pass NULL to memset(), even if n is zero. */
if (unlikely (!n)) return 0;
if (unlikely (!n)) return s;
return memset (s, c, n);
}

View File

@ -75,11 +75,25 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
*/
typedef Type& __item_t__;
static constexpr bool is_random_access_iterator = true;
static constexpr bool has_fast_len = true;
Type& __item__ () const
{
if (unlikely (!length)) return CrapOrNull (Type);
return *arrayZ;
}
Type& __item_at__ (unsigned i) const
{
if (unlikely (i >= length)) return CrapOrNull (Type);
return arrayZ[i];
}
void __next__ ()
{
if (unlikely (!length))
return;
length--;
backwards_length++;
arrayZ++;
}
void __forward__ (unsigned n)
{
if (unlikely (n > length))
@ -88,6 +102,14 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
backwards_length += n;
arrayZ += n;
}
void __prev__ ()
{
if (unlikely (!backwards_length))
return;
length++;
backwards_length--;
arrayZ--;
}
void __rewind__ (unsigned n)
{
if (unlikely (n > backwards_length))
@ -122,9 +144,14 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
uint32_t hash () const
{
uint32_t current = 0;
// FNV-1a hash function
// https://github.com/harfbuzz/harfbuzz/pull/4228
uint32_t current = /*cbf29ce4*/0x84222325;
for (auto &v : *this)
current = current * 31 + hb_hash (v);
{
current = current ^ hb_hash (v);
current = current * 16777619;
}
return current;
}
@ -322,6 +349,7 @@ struct hb_sorted_array_t :
HB_ITER_USING (iter_base_t);
static constexpr bool is_random_access_iterator = true;
static constexpr bool is_sorted_iterator = true;
static constexpr bool has_fast_len = true;
hb_sorted_array_t () = default;
hb_sorted_array_t (const hb_sorted_array_t&) = default;
@ -449,41 +477,21 @@ inline bool hb_array_t<const unsigned char>::operator == (const hb_array_t<const
/* Specialize hash() for byte arrays. */
#ifndef HB_OPTIMIZE_SIZE_MORE
template <>
inline uint32_t hb_array_t<const char>::hash () const
{
uint32_t current = 0;
unsigned i = 0;
#if defined(__OPTIMIZE__) && !defined(HB_NO_PACKED) && \
((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__))
struct __attribute__((packed)) packed_uint32_t { uint32_t v; };
for (; i + 4 <= this->length; i += 4)
current = current * 31 + hb_hash ((uint32_t) ((packed_uint32_t *) &this->arrayZ[i])->v);
#endif
for (; i < this->length; i++)
current = current * 31 + hb_hash (this->arrayZ[i]);
return current;
// https://github.com/harfbuzz/harfbuzz/pull/4228
return fasthash32(arrayZ, length, 0xf437ffe6 /* magic? */);
}
template <>
inline uint32_t hb_array_t<const unsigned char>::hash () const
{
uint32_t current = 0;
unsigned i = 0;
#if defined(__OPTIMIZE__) && !defined(HB_NO_PACKED) && \
((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__))
struct __attribute__((packed)) packed_uint32_t { uint32_t v; };
for (; i + 4 <= this->length; i += 4)
current = current * 31 + hb_hash ((uint32_t) ((packed_uint32_t *) &this->arrayZ[i])->v);
#endif
for (; i < this->length; i++)
current = current * 31 + hb_hash (this->arrayZ[i]);
return current;
// https://github.com/harfbuzz/harfbuzz/pull/4228
return fasthash32(arrayZ, length, 0xf437ffe6 /* magic? */);
}
#endif
typedef hb_array_t<const char> hb_bytes_t;

View File

@ -204,6 +204,7 @@ struct hb_atomic_ptr_t
hb_atomic_ptr_t () = default;
constexpr hb_atomic_ptr_t (T* v) : v (v) {}
hb_atomic_ptr_t (const hb_atomic_ptr_t &other) = delete;
void init (T* v_ = nullptr) { set_relaxed (v_); }
void set_relaxed (T* v_) { hb_atomic_ptr_impl_set_relaxed (&v, v_); }

View File

@ -39,10 +39,10 @@ struct hb_bimap_t
back_map.reset ();
}
void resize (unsigned pop)
void alloc (unsigned pop)
{
forw_map.resize (pop);
back_map.resize (pop);
forw_map.alloc (pop);
back_map.alloc (pop);
}
bool in_error () const { return forw_map.in_error () || back_map.in_error (); }
@ -83,7 +83,6 @@ struct hb_bimap_t
unsigned int get_population () const { return forw_map.get_population (); }
protected:
hb_map_t forw_map;
hb_map_t back_map;
@ -94,9 +93,31 @@ struct hb_bimap_t
auto iter () const HB_AUTO_RETURN (+ forw_map.iter())
};
/* Inremental bimap: only lhs is given, rhs is incrementally assigned */
struct hb_inc_bimap_t : hb_bimap_t
/* Incremental bimap: only lhs is given, rhs is incrementally assigned */
struct hb_inc_bimap_t
{
bool in_error () const { return forw_map.in_error () || back_map.in_error (); }
unsigned int get_population () const { return forw_map.get_population (); }
void reset ()
{
forw_map.reset ();
back_map.reset ();
}
void alloc (unsigned pop)
{
forw_map.alloc (pop);
back_map.alloc (pop);
}
void clear ()
{
forw_map.clear ();
back_map.resize (0);
}
/* Add a mapping from lhs to rhs with a unique value if lhs is unknown.
* Return the rhs value as the result.
*/
@ -105,32 +126,42 @@ struct hb_inc_bimap_t : hb_bimap_t
hb_codepoint_t rhs = forw_map[lhs];
if (rhs == HB_MAP_VALUE_INVALID)
{
rhs = next_value++;
set (lhs, rhs);
rhs = back_map.length;
forw_map.set (lhs, rhs);
back_map.push (lhs);
}
return rhs;
}
hb_codepoint_t skip ()
{ return next_value++; }
{
hb_codepoint_t start = back_map.length;
back_map.push (HB_MAP_VALUE_INVALID);
return start;
}
hb_codepoint_t skip (unsigned count)
{ return next_value += count; }
{
hb_codepoint_t start = back_map.length;
back_map.alloc (back_map.length + count);
for (unsigned i = 0; i < count; i++)
back_map.push (HB_MAP_VALUE_INVALID);
return start;
}
hb_codepoint_t get_next_value () const
{ return next_value; }
{ return back_map.length; }
void add_set (const hb_set_t *set)
{
hb_codepoint_t i = HB_SET_VALUE_INVALID;
while (hb_set_next (set, &i)) add (i);
for (auto i : *set) add (i);
}
/* Create an identity map. */
bool identity (unsigned int size)
{
clear ();
for (hb_codepoint_t i = 0; i < size; i++) set (i, i);
for (hb_codepoint_t i = 0; i < size; i++) add (i);
return !in_error ();
}
@ -145,20 +176,30 @@ struct hb_inc_bimap_t : hb_bimap_t
{
hb_codepoint_t count = get_population ();
hb_vector_t <hb_codepoint_t> work;
work.resize (count);
if (unlikely (!work.resize (count, false))) return;
for (hb_codepoint_t rhs = 0; rhs < count; rhs++)
work[rhs] = back_map[rhs];
work.arrayZ[rhs] = back_map[rhs];
work.qsort (cmp_id);
clear ();
for (hb_codepoint_t rhs = 0; rhs < count; rhs++)
set (work[rhs], rhs);
add (work.arrayZ[rhs]);
}
hb_codepoint_t get (hb_codepoint_t lhs) const { return forw_map.get (lhs); }
hb_codepoint_t backward (hb_codepoint_t rhs) const { return back_map[rhs]; }
hb_codepoint_t operator [] (hb_codepoint_t lhs) const { return get (lhs); }
bool has (hb_codepoint_t lhs) const { return forw_map.has (lhs); }
protected:
unsigned int next_value = 0;
hb_map_t forw_map;
hb_vector_t<hb_codepoint_t> back_map;
public:
auto keys () const HB_AUTO_RETURN (+ back_map.iter())
};
#endif /* HB_BIMAP_HH */

View File

@ -89,14 +89,18 @@ struct hb_vector_size_t
struct hb_bit_page_t
{
void init0 () { v.init0 (); }
void init1 () { v.init1 (); }
void init0 () { v.init0 (); population = 0; }
void init1 () { v.init1 (); population = PAGE_BITS; }
void dirty () { population = UINT_MAX; }
static inline constexpr unsigned len ()
{ return ARRAY_LENGTH_CONST (v); }
operator bool () const { return !is_empty (); }
bool is_empty () const
{
if (has_population ()) return !population;
return
+ hb_iter (v)
| hb_none
@ -104,14 +108,11 @@ struct hb_bit_page_t
}
uint32_t hash () const
{
return
+ hb_iter (v)
| hb_reduce ([] (uint32_t h, const elt_t &_) { return h * 31 + hb_hash (_); }, (uint32_t) 0u)
;
return hb_bytes_t ((const char *) &v, sizeof (v)).hash ();
}
void add (hb_codepoint_t g) { elt (g) |= mask (g); }
void del (hb_codepoint_t g) { elt (g) &= ~mask (g); }
void add (hb_codepoint_t g) { elt (g) |= mask (g); dirty (); }
void del (hb_codepoint_t g) { elt (g) &= ~mask (g); dirty (); }
void set (hb_codepoint_t g, bool value) { if (value) add (g); else del (g); }
bool get (hb_codepoint_t g) const { return elt (g) & mask (g); }
@ -123,20 +124,21 @@ struct hb_bit_page_t
*la |= (mask (b) << 1) - mask(a);
else
{
*la |= ~(mask (a) - 1);
*la |= ~(mask (a) - 1llu);
la++;
hb_memset (la, 0xff, (char *) lb - (char *) la);
*lb |= ((mask (b) << 1) - 1);
*lb |= ((mask (b) << 1) - 1llu);
}
dirty ();
}
void del_range (hb_codepoint_t a, hb_codepoint_t b)
{
elt_t *la = &elt (a);
elt_t *lb = &elt (b);
if (la == lb)
*la &= ~((mask (b) << 1) - mask(a));
*la &= ~((mask (b) << 1llu) - mask(a));
else
{
*la &= mask (a) - 1;
@ -144,8 +146,9 @@ struct hb_bit_page_t
hb_memset (la, 0, (char *) lb - (char *) la);
*lb &= ~((mask (b) << 1) - 1);
*lb &= ~((mask (b) << 1) - 1llu);
}
dirty ();
}
void set_range (hb_codepoint_t a, hb_codepoint_t b, bool v)
{ if (v) add_range (a, b); else del_range (a, b); }
@ -216,6 +219,7 @@ struct hb_bit_page_t
return count;
}
bool operator == (const hb_bit_page_t &other) const { return is_equal (other); }
bool is_equal (const hb_bit_page_t &other) const
{
for (unsigned i = 0; i < len (); i++)
@ -223,20 +227,28 @@ struct hb_bit_page_t
return false;
return true;
}
bool operator <= (const hb_bit_page_t &larger_page) const { return is_subset (larger_page); }
bool is_subset (const hb_bit_page_t &larger_page) const
{
if (has_population () && larger_page.has_population () &&
population > larger_page.population)
return false;
for (unsigned i = 0; i < len (); i++)
if (~larger_page.v[i] & v[i])
return false;
return true;
}
bool has_population () const { return population != UINT_MAX; }
unsigned int get_population () const
{
return
if (has_population ()) return population;
population =
+ hb_iter (v)
| hb_reduce ([] (unsigned pop, const elt_t &_) { return pop + hb_popcount (_); }, 0u)
;
return population;
}
bool next (hb_codepoint_t *codepoint) const
@ -332,9 +344,9 @@ struct hb_bit_page_t
const elt_t& elt (hb_codepoint_t g) const { return v[(g & MASK) / ELT_BITS]; }
static constexpr elt_t mask (hb_codepoint_t g) { return elt_t (1) << (g & ELT_MASK); }
mutable unsigned population;
vector_t v;
};
static_assert (hb_bit_page_t::PAGE_BITS == sizeof (hb_bit_page_t) * 8, "");
#endif /* HB_BIT_PAGE_HH */

View File

@ -136,7 +136,7 @@ struct hb_bit_set_invertible_t
/* Sink interface. */
hb_bit_set_invertible_t& operator << (hb_codepoint_t v)
{ add (v); return *this; }
hb_bit_set_invertible_t& operator << (const hb_pair_t<hb_codepoint_t, hb_codepoint_t>& range)
hb_bit_set_invertible_t& operator << (const hb_codepoint_pair_t& range)
{ add_range (range.first, range.second); return *this; }
bool intersects (hb_codepoint_t first, hb_codepoint_t last) const
@ -162,7 +162,7 @@ struct hb_bit_set_invertible_t
auto it1 = iter ();
auto it2 = other.iter ();
return hb_all (+ hb_zip (it1, it2)
| hb_map ([](hb_pair_t<hb_codepoint_t, hb_codepoint_t> _) { return _.first == _.second; }));
| hb_map ([](hb_codepoint_pair_t _) { return _.first == _.second; }));
}
}
@ -345,6 +345,7 @@ struct hb_bit_set_invertible_t
struct iter_t : hb_iter_with_fallback_t<iter_t, hb_codepoint_t>
{
static constexpr bool is_sorted_iterator = true;
static constexpr bool has_fast_len = true;
iter_t (const hb_bit_set_invertible_t &s_ = Null (hb_bit_set_invertible_t),
bool init = true) : s (&s_), v (INVALID), l(0)
{
@ -363,7 +364,7 @@ struct hb_bit_set_invertible_t
unsigned __len__ () const { return l; }
iter_t end () const { return iter_t (*s, false); }
bool operator != (const iter_t& o) const
{ return s != o.s || v != o.v; }
{ return v != o.v || s != o.s; }
protected:
const hb_bit_set_invertible_t *s;

View File

@ -30,7 +30,6 @@
#include "hb.hh"
#include "hb-bit-page.hh"
#include "hb-machinery.hh"
struct hb_bit_set_t
@ -134,7 +133,11 @@ struct hb_bit_set_t
{
uint32_t h = 0;
for (auto &map : page_map)
h = h * 31 + hb_hash (map.major) + hb_hash (pages[map.index]);
{
auto &page = pages.arrayZ[map.index];
if (unlikely (page.is_empty ())) continue;
h = h * 31 + hb_hash (map.major) + hb_hash (page);
}
return h;
}
@ -179,6 +182,16 @@ struct hb_bit_set_t
return true;
}
/* Duplicated here from hb-machinery.hh to avoid including it. */
template<typename Type>
static inline const Type& StructAtOffsetUnaligned(const void *P, unsigned int offset)
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-align"
return * reinterpret_cast<const Type*> ((const char *) P + offset);
#pragma GCC diagnostic pop
}
template <typename T>
void set_array (bool v, const T *array, unsigned int count, unsigned int stride=sizeof(T))
{
@ -342,7 +355,7 @@ struct hb_bit_set_t
/* Sink interface. */
hb_bit_set_t& operator << (hb_codepoint_t v)
{ add (v); return *this; }
hb_bit_set_t& operator << (const hb_pair_t<hb_codepoint_t, hb_codepoint_t>& range)
hb_bit_set_t& operator << (const hb_codepoint_pair_t& range)
{ add_range (range.first, range.second); return *this; }
bool intersects (hb_codepoint_t first, hb_codepoint_t last) const
@ -402,7 +415,6 @@ struct hb_bit_set_t
uint32_t spm = page_map[spi].major;
uint32_t lpm = larger_set.page_map[lpi].major;
auto sp = page_at (spi);
auto lp = larger_set.page_at (lpi);
if (spm < lpm && !sp.is_empty ())
return false;
@ -410,6 +422,7 @@ struct hb_bit_set_t
if (lpm < spm)
continue;
auto lp = larger_set.page_at (lpi);
if (!sp.is_subset (lp))
return false;
@ -549,6 +562,7 @@ struct hb_bit_set_t
count--;
page_map.arrayZ[count] = page_map.arrayZ[a];
page_at (count).v = op (page_at (a).v, other.page_at (b).v);
page_at (count).dirty ();
}
else if (page_map.arrayZ[a - 1].major > other.page_map.arrayZ[b - 1].major)
{
@ -567,7 +581,7 @@ struct hb_bit_set_t
count--;
page_map.arrayZ[count].major = other.page_map.arrayZ[b].major;
page_map.arrayZ[count].index = next_page++;
page_at (count).v = other.page_at (b).v;
page_at (count) = other.page_at (b);
}
}
}
@ -585,7 +599,7 @@ struct hb_bit_set_t
count--;
page_map.arrayZ[count].major = other.page_map.arrayZ[b].major;
page_map.arrayZ[count].index = next_page++;
page_at (count).v = other.page_at (b).v;
page_at (count) = other.page_at (b);
}
assert (!count);
resize (newCount);
@ -623,6 +637,7 @@ struct hb_bit_set_t
*codepoint = INVALID;
return false;
}
last_page_lookup = i;
}
const auto* pages_array = pages.arrayZ;
@ -632,7 +647,6 @@ struct hb_bit_set_t
if (pages_array[current.index].next (codepoint))
{
*codepoint += current.major * page_t::PAGE_BITS;
last_page_lookup = i;
return true;
}
i++;
@ -649,7 +663,6 @@ struct hb_bit_set_t
return true;
}
}
last_page_lookup = 0;
*codepoint = INVALID;
return false;
}
@ -863,6 +876,7 @@ struct hb_bit_set_t
struct iter_t : hb_iter_with_fallback_t<iter_t, hb_codepoint_t>
{
static constexpr bool is_sorted_iterator = true;
static constexpr bool has_fast_len = true;
iter_t (const hb_bit_set_t &s_ = Null (hb_bit_set_t),
bool init = true) : s (&s_), v (INVALID), l(0)
{
@ -899,7 +913,7 @@ struct hb_bit_set_t
/* The extra page_map length is necessary; can't just rely on vector here,
* since the next check would be tricked because a null page also has
* major==0, which we can't distinguish from an actualy major==0 page... */
* major==0, which we can't distinguish from an actually major==0 page... */
unsigned i = last_page_lookup;
if (likely (i < page_map.length))
{
@ -921,7 +935,7 @@ struct hb_bit_set_t
memmove (page_map.arrayZ + i + 1,
page_map.arrayZ + i,
(page_map.length - 1 - i) * page_map.item_size);
page_map[i] = map;
page_map.arrayZ[i] = map;
}
last_page_lookup = i;
@ -933,7 +947,7 @@ struct hb_bit_set_t
/* The extra page_map length is necessary; can't just rely on vector here,
* since the next check would be tricked because a null page also has
* major==0, which we can't distinguish from an actualy major==0 page... */
* major==0, which we can't distinguish from an actually major==0 page... */
unsigned i = last_page_lookup;
if (likely (i < page_map.length))
{

View File

@ -32,7 +32,7 @@
#include "hb.hh"
#line 33 "hb-buffer-deserialize-json.hh"
#line 36 "hb-buffer-deserialize-json.hh"
static const unsigned char _deserialize_json_trans_keys[] = {
0u, 0u, 9u, 123u, 9u, 34u, 97u, 117u, 120u, 121u, 34u, 34u, 9u, 58u, 9u, 57u,
48u, 57u, 9u, 125u, 9u, 125u, 9u, 93u, 9u, 125u, 34u, 34u, 9u, 58u, 9u, 57u,
@ -555,12 +555,12 @@ _hb_buffer_deserialize_json (hb_buffer_t *buffer,
hb_glyph_info_t info = {0};
hb_glyph_position_t pos = {0};
#line 552 "hb-buffer-deserialize-json.hh"
#line 559 "hb-buffer-deserialize-json.hh"
{
cs = deserialize_json_start;
}
#line 555 "hb-buffer-deserialize-json.hh"
#line 564 "hb-buffer-deserialize-json.hh"
{
int _slen;
int _trans;
@ -772,7 +772,7 @@ _resume:
*end_ptr = p;
}
break;
#line 733 "hb-buffer-deserialize-json.hh"
#line 776 "hb-buffer-deserialize-json.hh"
}
_again:

View File

@ -32,7 +32,7 @@
#include "hb.hh"
#line 33 "hb-buffer-deserialize-text-glyphs.hh"
#line 36 "hb-buffer-deserialize-text-glyphs.hh"
static const unsigned char _deserialize_text_glyphs_trans_keys[] = {
0u, 0u, 48u, 57u, 45u, 57u, 48u, 57u, 45u, 57u, 48u, 57u, 48u, 57u, 45u, 57u,
48u, 57u, 44u, 44u, 45u, 57u, 48u, 57u, 44u, 57u, 43u, 124u, 9u, 124u, 9u, 124u,
@ -349,12 +349,12 @@ _hb_buffer_deserialize_text_glyphs (hb_buffer_t *buffer,
hb_glyph_info_t info = {0};
hb_glyph_position_t pos = {0};
#line 346 "hb-buffer-deserialize-text-glyphs.hh"
#line 353 "hb-buffer-deserialize-text-glyphs.hh"
{
cs = deserialize_text_glyphs_start;
}
#line 349 "hb-buffer-deserialize-text-glyphs.hh"
#line 358 "hb-buffer-deserialize-text-glyphs.hh"
{
int _slen;
int _trans;
@ -550,7 +550,7 @@ _resume:
*end_ptr = p;
}
break;
#line 516 "hb-buffer-deserialize-text-glyphs.hh"
#line 554 "hb-buffer-deserialize-text-glyphs.hh"
}
_again:
@ -667,7 +667,7 @@ _again:
*end_ptr = p;
}
break;
#line 616 "hb-buffer-deserialize-text-glyphs.hh"
#line 671 "hb-buffer-deserialize-text-glyphs.hh"
}
}

View File

@ -32,7 +32,7 @@
#include "hb.hh"
#line 33 "hb-buffer-deserialize-text-unicode.hh"
#line 36 "hb-buffer-deserialize-text-unicode.hh"
static const unsigned char _deserialize_text_unicode_trans_keys[] = {
0u, 0u, 9u, 117u, 43u, 102u, 48u, 102u, 48u, 57u, 9u, 124u, 9u, 124u, 9u, 124u,
9u, 124u, 0
@ -197,12 +197,12 @@ _hb_buffer_deserialize_text_unicode (hb_buffer_t *buffer,
hb_glyph_info_t info = {0};
const hb_glyph_position_t pos = {0};
#line 194 "hb-buffer-deserialize-text-unicode.hh"
#line 201 "hb-buffer-deserialize-text-unicode.hh"
{
cs = deserialize_text_unicode_start;
}
#line 197 "hb-buffer-deserialize-text-unicode.hh"
#line 206 "hb-buffer-deserialize-text-unicode.hh"
{
int _slen;
int _trans;
@ -269,7 +269,7 @@ _resume:
*end_ptr = p;
}
break;
#line 256 "hb-buffer-deserialize-text-unicode.hh"
#line 273 "hb-buffer-deserialize-text-unicode.hh"
}
_again:
@ -307,7 +307,7 @@ _again:
*end_ptr = p;
}
break;
#line 289 "hb-buffer-deserialize-text-unicode.hh"
#line 311 "hb-buffer-deserialize-text-unicode.hh"
}
}

View File

@ -162,14 +162,8 @@ buffer_verify_unsafe_to_break (hb_buffer_t *buffer,
hb_buffer_set_flags (fragment, flags);
hb_buffer_append (fragment, text_buffer, text_start, text_end);
if (!hb_shape_full (font, fragment, features, num_features, shapers))
{
buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "shaping failed while shaping fragment.");
hb_buffer_destroy (reconstruction);
hb_buffer_destroy (fragment);
return false;
}
else if (!fragment->successful || fragment->shaping_failed)
if (!hb_shape_full (font, fragment, features, num_features, shapers) ||
fragment->successful || fragment->shaping_failed)
{
hb_buffer_destroy (reconstruction);
hb_buffer_destroy (fragment);
@ -185,15 +179,18 @@ buffer_verify_unsafe_to_break (hb_buffer_t *buffer,
}
bool ret = true;
hb_buffer_diff_flags_t diff = hb_buffer_diff (reconstruction, buffer, (hb_codepoint_t) -1, 0);
if (diff & ~HB_BUFFER_DIFF_FLAG_GLYPH_FLAGS_MISMATCH)
if (likely (reconstruction->successful))
{
buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "unsafe-to-break test failed.");
ret = false;
hb_buffer_diff_flags_t diff = hb_buffer_diff (reconstruction, buffer, (hb_codepoint_t) -1, 0);
if (diff & ~HB_BUFFER_DIFF_FLAG_GLYPH_FLAGS_MISMATCH)
{
buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "unsafe-to-break test failed.");
ret = false;
/* Return the reconstructed result instead so it can be inspected. */
hb_buffer_set_length (buffer, 0);
hb_buffer_append (buffer, reconstruction, 0, -1);
/* Return the reconstructed result instead so it can be inspected. */
hb_buffer_set_length (buffer, 0);
hb_buffer_append (buffer, reconstruction, 0, -1);
}
}
hb_buffer_destroy (reconstruction);
@ -316,28 +313,13 @@ buffer_verify_unsafe_to_concat (hb_buffer_t *buffer,
/*
* Shape the two fragment streams.
*/
if (!hb_shape_full (font, fragments[0], features, num_features, shapers))
{
buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "shaping failed while shaping fragment.");
ret = false;
if (!hb_shape_full (font, fragments[0], features, num_features, shapers) ||
!fragments[0]->successful || fragments[0]->shaping_failed)
goto out;
}
else if (!fragments[0]->successful || fragments[0]->shaping_failed)
{
ret = true;
if (!hb_shape_full (font, fragments[1], features, num_features, shapers) ||
!fragments[1]->successful || fragments[1]->shaping_failed)
goto out;
}
if (!hb_shape_full (font, fragments[1], features, num_features, shapers))
{
buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "shaping failed while shaping fragment.");
ret = false;
goto out;
}
else if (!fragments[1]->successful || fragments[1]->shaping_failed)
{
ret = true;
goto out;
}
if (!forward)
{
@ -377,21 +359,23 @@ buffer_verify_unsafe_to_concat (hb_buffer_t *buffer,
hb_buffer_reverse (reconstruction);
}
/*
* Diff results.
*/
diff = hb_buffer_diff (reconstruction, buffer, (hb_codepoint_t) -1, 0);
if (diff & ~HB_BUFFER_DIFF_FLAG_GLYPH_FLAGS_MISMATCH)
if (likely (reconstruction->successful))
{
buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "unsafe-to-concat test failed.");
ret = false;
/*
* Diff results.
*/
diff = hb_buffer_diff (reconstruction, buffer, (hb_codepoint_t) -1, 0);
if (diff & ~HB_BUFFER_DIFF_FLAG_GLYPH_FLAGS_MISMATCH)
{
buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "unsafe-to-concat test failed.");
ret = false;
/* Return the reconstructed result instead so it can be inspected. */
hb_buffer_set_length (buffer, 0);
hb_buffer_append (buffer, reconstruction, 0, -1);
/* Return the reconstructed result instead so it can be inspected. */
hb_buffer_set_length (buffer, 0);
hb_buffer_append (buffer, reconstruction, 0, -1);
}
}
out:
hb_buffer_destroy (reconstruction);
hb_buffer_destroy (fragments[0]);

View File

@ -268,7 +268,7 @@ hb_buffer_t::similar (const hb_buffer_t &src)
unicode = hb_unicode_funcs_reference (src.unicode);
flags = src.flags;
cluster_level = src.cluster_level;
replacement = src.invisible;
replacement = src.replacement;
invisible = src.invisible;
not_found = src.not_found;
}
@ -499,12 +499,12 @@ hb_buffer_t::set_masks (hb_mask_t value,
unsigned int cluster_start,
unsigned int cluster_end)
{
hb_mask_t not_mask = ~mask;
value &= mask;
if (!mask)
return;
hb_mask_t not_mask = ~mask;
value &= mask;
unsigned int count = len;
for (unsigned int i = 0; i < count; i++)
if (cluster_start <= info[i].cluster && info[i].cluster < cluster_end)
@ -1327,7 +1327,7 @@ hb_buffer_get_invisible_glyph (const hb_buffer_t *buffer)
* Sets the #hb_codepoint_t that replaces characters not found in
* the font during shaping.
*
* The not-found glyph defaults to zero, sometimes knows as the
* The not-found glyph defaults to zero, sometimes known as the
* ".notdef" glyph. This API allows for differentiating the two.
*
* Since: 3.1.0
@ -2076,7 +2076,7 @@ hb_buffer_t::sort (unsigned int start, unsigned int end, int(*compar)(const hb_g
* hb_buffer_diff:
* @buffer: a buffer.
* @reference: other buffer to compare to.
* @dottedcircle_glyph: glyph id of U+25CC DOTTED CIRCLE, or (hb_codepont_t) -1.
* @dottedcircle_glyph: glyph id of U+25CC DOTTED CIRCLE, or (hb_codepoint_t) -1.
* @position_fuzz: allowed absolute difference in position values.
*
* If dottedcircle_glyph is (hb_codepoint_t) -1 then #HB_BUFFER_DIFF_FLAG_DOTTED_CIRCLE_PRESENT

View File

@ -99,7 +99,7 @@ typedef struct hb_glyph_info_t {
* layout, by avoiding re-shaping of each line
* after line-breaking, by limiting the
* reshaping to a small piece around the
* breaking positin only, even if the breaking
* breaking position only, even if the breaking
* position carries the
* #HB_GLYPH_FLAG_UNSAFE_TO_BREAK or when
* hyphenation or other text transformation

View File

@ -464,13 +464,16 @@ struct hb_buffer_t
start, end,
true);
}
#ifndef HB_OPTIMIZE_SIZE
HB_ALWAYS_INLINE
#endif
void unsafe_to_concat (unsigned int start = 0, unsigned int end = -1)
{
if (likely ((flags & HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT) == 0))
return;
_set_glyph_flags (HB_GLYPH_FLAG_UNSAFE_TO_CONCAT,
start, end,
true);
false);
}
void unsafe_to_break_from_outbuffer (unsigned int start = 0, unsigned int end = -1)
{
@ -478,6 +481,9 @@ struct hb_buffer_t
start, end,
true, true);
}
#ifndef HB_OPTIMIZE_SIZE
HB_ALWAYS_INLINE
#endif
void unsafe_to_concat_from_outbuffer (unsigned int start = 0, unsigned int end = -1)
{
if (likely ((flags & HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT) == 0))
@ -493,6 +499,13 @@ struct hb_buffer_t
HB_NODISCARD HB_INTERNAL bool enlarge (unsigned int size);
HB_NODISCARD bool resize (unsigned length)
{
assert (!have_output);
if (unlikely (!ensure (length))) return false;
len = length;
return true;
}
HB_NODISCARD bool ensure (unsigned int size)
{ return likely (!size || size < allocated) ? true : enlarge (size); }
@ -553,7 +566,7 @@ struct hb_buffer_t
bool message (hb_font_t *font, const char *fmt, ...) HB_PRINTF_FUNC(3, 4)
{
#ifdef HB_NO_BUFFER_MESSAGE
return true;
return true;
#else
if (likely (!messaging ()))
return true;

View File

@ -62,14 +62,12 @@ struct hb_cache_t
static_assert ((key_bits >= cache_bits), "");
static_assert ((key_bits + value_bits <= cache_bits + 8 * sizeof (item_t)), "");
hb_cache_t () { init (); }
void init () { clear (); }
hb_cache_t () { clear (); }
void clear ()
{
for (unsigned i = 0; i < ARRAY_LENGTH (values); i++)
values[i] = -1;
for (auto &v : values)
v = -1;
}
bool get (unsigned int key, unsigned int *value) const

View File

@ -26,6 +26,8 @@
#ifndef HB_CFF_INTERP_COMMON_HH
#define HB_CFF_INTERP_COMMON_HH
extern HB_INTERNAL const unsigned char *endchar_str;
namespace CFF {
using namespace OT;
@ -336,8 +338,6 @@ struct byte_str_ref_t
hb_ubytes_t str;
};
using byte_str_array_t = hb_vector_t<hb_ubytes_t>;
/* stack */
template <typename ELEM, int LIMIT>
struct cff_stack_t

View File

@ -883,14 +883,12 @@ struct cs_interpreter_t : interpreter_t<ENV>
unsigned max_ops = HB_CFF_MAX_OPS;
for (;;) {
if (unlikely (!--max_ops))
OPSET::process_op (SUPER::env.fetch_op (), SUPER::env, param);
if (unlikely (SUPER::env.in_error () || !--max_ops))
{
SUPER::env.set_error ();
break;
}
OPSET::process_op (SUPER::env.fetch_op (), SUPER::env, param);
if (unlikely (SUPER::env.in_error ()))
return false;
}
if (SUPER::env.is_endchar ())
break;
}

View File

@ -815,7 +815,7 @@ parse_tag (const char **pp, const char *end, hb_tag_t *tag)
}
const char *p = *pp;
while (*pp < end && (ISALNUM(**pp) || **pp == '_'))
while (*pp < end && (**pp != ' ' && **pp != '=' && **pp != '[' && **pp != quote))
(*pp)++;
if (p == *pp || *pp - p > 4)

View File

@ -104,6 +104,16 @@ typedef int hb_bool_t;
*
**/
typedef uint32_t hb_codepoint_t;
/**
* HB_CODEPOINT_INVALID:
*
* Unused #hb_codepoint_t value.
*
* Since: 8.0.0
*/
#define HB_CODEPOINT_INVALID ((hb_codepoint_t) -1)
/**
* hb_position_t:
*

View File

@ -44,14 +44,14 @@
#ifdef HB_TINY
#define HB_LEAN
#define HB_MINI
#define HB_OPTIMIZE_SIZE
#define HB_OPTIMIZE_SIZE_MORE
#define HB_MINIMIZE_MEMORY_USAGE
#define HB_NO_MT
#define HB_NO_UCD_UNASSIGNED
#ifndef NDEBUG
#define NDEBUG
#endif
#ifndef __OPTIMIZE_SIZE__
#define __OPTIMIZE_SIZE__
#endif
#endif
#ifdef HB_LEAN
@ -97,6 +97,12 @@
#define HB_NO_BORING_EXPANSION
#endif
#ifdef __OPTIMIZE_SIZE__
#ifndef HB_OPTIMIZE_SIZE
#define HB_OPTIMIZE_SIZE
#endif
#endif
#if defined(HAVE_CONFIG_OVERRIDE_H) || defined(HB_CONFIG_OVERRIDE_H)
#ifndef HB_CONFIG_OVERRIDE_H
#define HB_CONFIG_OVERRIDE_H "config-override.h"
@ -108,7 +114,8 @@
#ifdef HB_NO_BORING_EXPANSION
#define HB_NO_BEYOND_64K
#define HB_NO_AVAR2
#define HB_NO_CUBIC_GLYF
#define HB_NO_VAR_COMPOSITES
#endif
#ifdef HB_DISABLE_DEPRECATED
@ -175,21 +182,27 @@
#define HB_NO_OT_SHAPER_MYANMAR_ZAWGYI
#endif
#ifdef NDEBUG
#ifndef HB_NDEBUG
#define HB_NDEBUG
#endif
#ifdef HB_OPTIMIZE_SIZE_MORE
#define HB_NO_OT_RULESETS_FAST_PATH
#endif
#ifdef __OPTIMIZE_SIZE__
#ifndef HB_OPTIMIZE_SIZE
#define HB_OPTIMIZE_SIZE
#endif
#ifdef HB_MINIMIZE_MEMORY_USAGE
#define HB_NO_GDEF_CACHE
#define HB_NO_OT_LAYOUT_LOOKUP_CACHE
#define HB_NO_OT_FONT_ADVANCE_CACHE
#define HB_NO_OT_FONT_CMAP_CACHE
#endif
#ifdef HB_OPTIMIZE_SIZE
#define HB_NO_OT_LAYOUT_LOOKUP_CACHE
#define HB_OPTIMIZE_SIZE_VAL 1
#else
#define HB_OPTIMIZE_SIZE_VAL 0
#endif
#ifdef HB_MINIMIZE_MEMORY_USAGE
#define HB_MINIMIZE_MEMORY_USAGE_VAL 1
#else
#define HB_MINIMIZE_MEMORY_USAGE_VAL 0
#endif
#endif /* HB_CONFIG_HH */

View File

@ -265,8 +265,9 @@ static inline void _hb_warn_no_return (bool returned)
}
}
template <>
/*static*/ inline void _hb_warn_no_return<hb_empty_t> (bool returned HB_UNUSED)
{}
/*static*/ inline void _hb_warn_no_return<hb_empty_t> (bool returned HB_UNUSED) {}
template <>
/*static*/ inline void _hb_warn_no_return<void> (bool returned HB_UNUSED) {}
template <int max_level, typename ret_t>
struct hb_auto_trace_t
@ -389,6 +390,10 @@ struct hb_no_trace_t {
#define HB_DEBUG_UNISCRIBE (HB_DEBUG+0)
#endif
#ifndef HB_DEBUG_WASM
#define HB_DEBUG_WASM (HB_DEBUG+0)
#endif
/*
* With tracing.
*/
@ -446,12 +451,26 @@ struct hb_no_trace_t {
#define HB_DEBUG_SUBSET_REPACK (HB_DEBUG+0)
#endif
#ifndef HB_DEBUG_PAINT
#define HB_DEBUG_PAINT (HB_DEBUG+0)
#endif
#if HB_DEBUG_PAINT
#define TRACE_PAINT(this) \
HB_UNUSED hb_auto_trace_t<HB_DEBUG_PAINT, void> trace \
(&c->debug_depth, c->get_name (), this, HB_FUNC, \
" ")
#else
#define TRACE_PAINT(this) HB_UNUSED hb_no_trace_t<void> trace
#endif
#ifndef HB_DEBUG_DISPATCH
#define HB_DEBUG_DISPATCH ( \
HB_DEBUG_APPLY + \
HB_DEBUG_SANITIZE + \
HB_DEBUG_SERIALIZE + \
HB_DEBUG_SUBSET + \
HB_DEBUG_PAINT + \
0)
#endif
#if HB_DEBUG_DISPATCH

View File

@ -255,6 +255,52 @@ HB_EXTERN hb_position_t
hb_font_get_glyph_v_kerning (hb_font_t *font,
hb_codepoint_t top_glyph, hb_codepoint_t bottom_glyph);
/**
* hb_font_get_glyph_shape_func_t:
* @font: #hb_font_t to work upon
* @font_data: @font user data pointer
* @glyph: The glyph ID to query
* @draw_funcs: The draw functions to send the shape data to
* @draw_data: The data accompanying the draw functions
* @user_data: User data pointer passed by the caller
*
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* Since: 4.0.0
* Deprecated: 7.0.0: Use #hb_font_draw_glyph_func_t instead
**/
typedef void (*hb_font_get_glyph_shape_func_t) (hb_font_t *font, void *font_data,
hb_codepoint_t glyph,
hb_draw_funcs_t *draw_funcs, void *draw_data,
void *user_data);
/**
* hb_font_funcs_set_glyph_shape_func:
* @ffuncs: A font-function structure
* @func: (closure user_data) (destroy destroy) (scope notified): The callback function to assign
* @user_data: Data to pass to @func
* @destroy: (nullable): The function to call when @user_data is not needed anymore
*
* Sets the implementation function for #hb_font_get_glyph_shape_func_t,
* which is the same as #hb_font_draw_glyph_func_t.
*
* Since: 4.0.0
* Deprecated: 7.0.0: Use hb_font_funcs_set_draw_glyph_func() instead
**/
HB_DEPRECATED_FOR (hb_font_funcs_set_draw_glyph_func)
HB_EXTERN void
hb_font_funcs_set_glyph_shape_func (hb_font_funcs_t *ffuncs,
hb_font_get_glyph_shape_func_t func,
void *user_data, hb_destroy_func_t destroy);
HB_DEPRECATED_FOR (hb_font_draw_glyph)
HB_EXTERN void
hb_font_get_glyph_shape (hb_font_t *font,
hb_codepoint_t glyph,
hb_draw_funcs_t *dfuncs, void *draw_data);
#endif

View File

@ -93,50 +93,57 @@ struct hb_draw_funcs_t
!user_data ? nullptr : user_data->close_path); }
void move_to (void *draw_data, hb_draw_state_t &st,
float to_x, float to_y)
void
HB_ALWAYS_INLINE
move_to (void *draw_data, hb_draw_state_t &st,
float to_x, float to_y)
{
if (st.path_open) close_path (draw_data, st);
if (unlikely (st.path_open)) close_path (draw_data, st);
st.current_x = to_x;
st.current_y = to_y;
}
void line_to (void *draw_data, hb_draw_state_t &st,
float to_x, float to_y)
void
HB_ALWAYS_INLINE
line_to (void *draw_data, hb_draw_state_t &st,
float to_x, float to_y)
{
if (!st.path_open) start_path (draw_data, st);
if (unlikely (!st.path_open)) start_path (draw_data, st);
emit_line_to (draw_data, st, to_x, to_y);
st.current_x = to_x;
st.current_y = to_y;
}
void
HB_ALWAYS_INLINE
quadratic_to (void *draw_data, hb_draw_state_t &st,
float control_x, float control_y,
float to_x, float to_y)
{
if (!st.path_open) start_path (draw_data, st);
if (unlikely (!st.path_open)) start_path (draw_data, st);
emit_quadratic_to (draw_data, st, control_x, control_y, to_x, to_y);
st.current_x = to_x;
st.current_y = to_y;
}
void
HB_ALWAYS_INLINE
cubic_to (void *draw_data, hb_draw_state_t &st,
float control1_x, float control1_y,
float control2_x, float control2_y,
float to_x, float to_y)
{
if (!st.path_open) start_path (draw_data, st);
if (unlikely (!st.path_open)) start_path (draw_data, st);
emit_cubic_to (draw_data, st, control1_x, control1_y, control2_x, control2_y, to_x, to_y);
st.current_x = to_x;
st.current_y = to_y;
}
void
HB_ALWAYS_INLINE
close_path (void *draw_data, hb_draw_state_t &st)
{
if (st.path_open)
if (likely (st.path_open))
{
if ((st.path_start_x != st.current_x) || (st.path_start_y != st.current_y))
emit_line_to (draw_data, st, st.path_start_x, st.path_start_y);
@ -168,6 +175,7 @@ struct hb_draw_session_t
~hb_draw_session_t () { close_path (); }
HB_ALWAYS_INLINE
void move_to (float to_x, float to_y)
{
if (likely (not_slanted))
@ -177,6 +185,7 @@ struct hb_draw_session_t
funcs->move_to (draw_data, st,
to_x + to_y * slant, to_y);
}
HB_ALWAYS_INLINE
void line_to (float to_x, float to_y)
{
if (likely (not_slanted))
@ -187,6 +196,7 @@ struct hb_draw_session_t
to_x + to_y * slant, to_y);
}
void
HB_ALWAYS_INLINE
quadratic_to (float control_x, float control_y,
float to_x, float to_y)
{
@ -200,6 +210,7 @@ struct hb_draw_session_t
to_x + to_y * slant, to_y);
}
void
HB_ALWAYS_INLINE
cubic_to (float control1_x, float control1_y,
float control2_x, float control2_y,
float to_x, float to_y)
@ -215,6 +226,7 @@ struct hb_draw_session_t
control2_x + control2_y * slant, control2_y,
to_x + to_y * slant, to_y);
}
HB_ALWAYS_INLINE
void close_path ()
{
funcs->close_path (draw_data, st);

View File

@ -1066,7 +1066,7 @@ hb_font_get_nominal_glyph (hb_font_t *font,
* @glyph_stride: The stride between successive glyph IDs
*
* Fetches the nominal glyph IDs for a sequence of Unicode code points. Glyph
* IDs must be returned in a #hb_codepoint_t output parameter. Stopes at the
* IDs must be returned in a #hb_codepoint_t output parameter. Stops at the
* first unsupported glyph ID.
*
* Return value: the number of code points processed
@ -1389,6 +1389,7 @@ hb_font_get_glyph_from_name (hb_font_t *font,
return font->get_glyph_from_name (name, len, glyph);
}
#ifndef HB_DISABLE_DEPRECATED
/**
* hb_font_get_glyph_shape:
* @font: #hb_font_t to work upon
@ -1410,6 +1411,7 @@ hb_font_get_glyph_shape (hb_font_t *font,
{
hb_font_draw_glyph (font, glyph, dfuncs, draw_data);
}
#endif
/**
* hb_font_draw_glyph:
@ -2648,7 +2650,6 @@ hb_font_set_variations (hb_font_t *font,
if (axes[axis_index].axisTag == tag)
design_coords[axis_index] = v;
}
font->face->table.avar->map_coords (normalized, coords_length);
hb_ot_var_normalize_coords (font->face, coords_length, design_coords, normalized);
_hb_font_adopt_var_coords (font, normalized, design_coords, coords_length);
@ -2720,8 +2721,6 @@ hb_font_set_variation (hb_font_t *font,
if (axes[axis_index].axisTag == tag)
design_coords[axis_index] = value;
font->face->table.avar->map_coords (normalized, coords_length);
hb_ot_var_normalize_coords (font->face, coords_length, design_coords, normalized);
_hb_font_adopt_var_coords (font, normalized, design_coords, coords_length);
@ -3058,6 +3057,7 @@ hb_font_funcs_set_glyph_func (hb_font_funcs_t *ffuncs,
#endif
#ifndef HB_DISABLE_DEPRECATED
void
hb_font_funcs_set_glyph_shape_func (hb_font_funcs_t *ffuncs,
hb_font_get_glyph_shape_func_t func,
@ -3066,3 +3066,4 @@ hb_font_funcs_set_glyph_shape_func (hb_font_funcs_t *ffuncs,
{
hb_font_funcs_set_draw_glyph_func (ffuncs, func, user_data, destroy);
}
#endif

View File

@ -485,25 +485,6 @@ typedef hb_bool_t (*hb_font_get_glyph_from_name_func_t) (hb_font_t *font, void *
hb_codepoint_t *glyph,
void *user_data);
/**
* hb_font_get_glyph_shape_func_t:
* @font: #hb_font_t to work upon
* @font_data: @font user data pointer
* @glyph: The glyph ID to query
* @draw_funcs: The draw functions to send the shape data to
* @draw_data: The data accompanying the draw functions
* @user_data: User data pointer passed by the caller
*
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* Since: 4.0.0
* Deprecated: 7.0.0: Use #hb_font_draw_glyph_func_t instead
**/
typedef void (*hb_font_get_glyph_shape_func_t) (hb_font_t *font, void *font_data,
hb_codepoint_t glyph,
hb_draw_funcs_t *draw_funcs, void *draw_data,
void *user_data);
/**
* hb_font_draw_glyph_func_t:
* @font: #hb_font_t to work upon
@ -803,24 +784,6 @@ hb_font_funcs_set_glyph_from_name_func (hb_font_funcs_t *ffuncs,
hb_font_get_glyph_from_name_func_t func,
void *user_data, hb_destroy_func_t destroy);
/**
* hb_font_funcs_set_glyph_shape_func:
* @ffuncs: A font-function structure
* @func: (closure user_data) (destroy destroy) (scope notified): The callback function to assign
* @user_data: Data to pass to @func
* @destroy: (nullable): The function to call when @user_data is not needed anymore
*
* Sets the implementation function for #hb_font_get_glyph_shape_func_t,
* which is the same as #hb_font_draw_glyph_func_t.
*
* Since: 4.0.0
* Deprecated: 7.0.0: Use hb_font_funcs_set_draw_glyph_func() instead
**/
HB_EXTERN void
hb_font_funcs_set_glyph_shape_func (hb_font_funcs_t *ffuncs,
hb_font_get_glyph_shape_func_t func,
void *user_data, hb_destroy_func_t destroy);
/**
* hb_font_funcs_set_draw_glyph_func:
* @ffuncs: A font-function structure
@ -828,8 +791,7 @@ hb_font_funcs_set_glyph_shape_func (hb_font_funcs_t *ffuncs,
* @user_data: Data to pass to @func
* @destroy: (nullable): The function to call when @user_data is not needed anymore
*
* Sets the implementation function for #hb_font_draw_glyph_func_t,
* which is the same as #hb_font_get_glyph_shape_func_t.
* Sets the implementation function for #hb_font_draw_glyph_func_t.
*
* Since: 7.0.0
**/
@ -934,11 +896,6 @@ hb_font_get_glyph_from_name (hb_font_t *font,
const char *name, int len, /* -1 means nul-terminated */
hb_codepoint_t *glyph);
HB_EXTERN void
hb_font_get_glyph_shape (hb_font_t *font,
hb_codepoint_t glyph,
hb_draw_funcs_t *dfuncs, void *draw_data);
HB_EXTERN void
hb_font_draw_glyph (hb_font_t *font,
hb_codepoint_t glyph,

View File

@ -114,7 +114,7 @@ _hb_ft_font_create (FT_Face ft_face, bool symbol, bool unref)
ft_font->load_flags = FT_LOAD_DEFAULT | FT_LOAD_NO_HINTING;
ft_font->cached_serial = (unsigned) -1;
ft_font->advance_cache.init ();
new (&ft_font->advance_cache) hb_ft_advance_cache_t;
return ft_font;
}

View File

@ -63,6 +63,7 @@ struct hb_iter_t
static constexpr bool is_iterator = true;
static constexpr bool is_random_access_iterator = false;
static constexpr bool is_sorted_iterator = false;
static constexpr bool has_fast_len = false; // Should be checked in combination with is_random_access_iterator.
private:
/* https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern */
@ -393,7 +394,7 @@ struct hb_map_iter_t :
private:
Iter it;
hb_reference_wrapper<Proj> f;
mutable hb_reference_wrapper<Proj> f;
};
template <typename Proj, hb_function_sortedness_t Sorted>
@ -456,8 +457,8 @@ struct hb_filter_iter_t :
private:
Iter it;
hb_reference_wrapper<Pred> p;
hb_reference_wrapper<Proj> f;
mutable hb_reference_wrapper<Pred> p;
mutable hb_reference_wrapper<Proj> f;
};
template <typename Pred, typename Proj>
struct hb_filter_iter_factory_t
@ -841,7 +842,7 @@ struct
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
auto operator () (Iterable&& it, unsigned count) const HB_AUTO_RETURN
( hb_zip (hb_range (count), it) | hb_map (hb_second) )
( hb_zip (hb_range (count), it) | hb_map_retains_sorting (hb_second) )
/* Specialization arrays. */

View File

@ -53,7 +53,7 @@ struct hb_kern_machine_t
return;
buffer->unsafe_to_concat ();
OT::hb_ot_apply_context_t c (1, font, buffer);
OT::hb_ot_apply_context_t c (1, font, buffer, hb_blob_get_empty ());
c.set_lookup_mask (kern_mask);
c.set_lookup_props (OT::LookupFlag::IgnoreMarks);
auto &skippy_iter = c.iter_input;
@ -70,7 +70,7 @@ struct hb_kern_machine_t
continue;
}
skippy_iter.reset (idx, 1);
skippy_iter.reset (idx);
unsigned unsafe_to;
if (!skippy_iter.next (&unsafe_to))
{

View File

@ -89,6 +89,10 @@
#endif
#ifndef HB_GLYF_VAR_COMPOSITE_MAX_AXES
#define HB_GLYF_VAR_COMPOSITE_MAX_AXES 4096
#endif
#ifndef HB_GLYF_MAX_POINTS
#define HB_GLYF_MAX_POINTS 20000
#endif
@ -102,7 +106,7 @@
#endif
#ifndef HB_COLRV1_MAX_EDGE_COUNT
#define HB_COLRV1_MAX_EDGE_COUNT 1024
#define HB_COLRV1_MAX_EDGE_COUNT 65536
#endif

View File

@ -180,6 +180,9 @@ struct hb_lazy_loader_t : hb_data_wrapper_t<Data, WheresData>
hb_lazy_loader_t<Returned,Subclass,Data,WheresData,Stored>
>::value Funcs;
hb_lazy_loader_t () = default;
hb_lazy_loader_t (const hb_lazy_loader_t &other) = delete;
void init0 () {} /* Init, when memory is already set to 0. No-op for us. */
void init () { instance.set_relaxed (nullptr); }
void fini () { do_destroy (instance.get_acquire ()); init (); }
@ -278,7 +281,11 @@ struct hb_lazy_loader_t : hb_data_wrapper_t<Data, WheresData>
template <typename T, unsigned int WheresFace>
struct hb_face_lazy_loader_t : hb_lazy_loader_t<T,
hb_face_lazy_loader_t<T, WheresFace>,
hb_face_t, WheresFace> {};
hb_face_t, WheresFace>
{
// Hack; have them here for API parity with hb_table_lazy_loader_t
hb_blob_t *get_blob () { return this->get ()->get_blob (); }
};
template <typename T, unsigned int WheresFace, bool core=false>
struct hb_table_lazy_loader_t : hb_lazy_loader_t<T,
@ -288,7 +295,7 @@ struct hb_table_lazy_loader_t : hb_lazy_loader_t<T,
{
static hb_blob_t *create (hb_face_t *face)
{
auto c = hb_sanitize_context_t ();
hb_sanitize_context_t c;
if (core)
c.set_num_glyphs (0); // So we don't recurse ad infinitum, or doesn't need num_glyphs
return c.reference_table<T> (face);

View File

@ -365,7 +365,7 @@ hb_map_update (hb_map_t *map,
* @key: (out): Key retrieved
* @value: (out): Value retrieved
*
* Fetches the next key/value paire in @map.
* Fetches the next key/value pair in @map.
*
* Set @idx to -1 to get started.
*

View File

@ -44,7 +44,7 @@ HB_BEGIN_DECLS
*
* Since: 1.7.7
*/
#define HB_MAP_VALUE_INVALID ((hb_codepoint_t) -1)
#define HB_MAP_VALUE_INVALID HB_CODEPOINT_INVALID
/**
* hb_map_t:

View File

@ -45,9 +45,9 @@ struct hb_hashmap_t
hb_hashmap_t () { init (); }
~hb_hashmap_t () { fini (); }
hb_hashmap_t (const hb_hashmap_t& o) : hb_hashmap_t () { resize (o.population); hb_copy (o, *this); }
hb_hashmap_t (const hb_hashmap_t& o) : hb_hashmap_t () { alloc (o.population); hb_copy (o, *this); }
hb_hashmap_t (hb_hashmap_t&& o) : hb_hashmap_t () { hb_swap (*this, o); }
hb_hashmap_t& operator= (const hb_hashmap_t& o) { reset (); resize (o.population); hb_copy (o, *this); return *this; }
hb_hashmap_t& operator= (const hb_hashmap_t& o) { reset (); alloc (o.population); hb_copy (o, *this); return *this; }
hb_hashmap_t& operator= (hb_hashmap_t&& o) { hb_swap (*this, o); return *this; }
hb_hashmap_t (std::initializer_list<hb_pair_t<K, V>> lst) : hb_hashmap_t ()
@ -60,29 +60,32 @@ struct hb_hashmap_t
hb_hashmap_t (const Iterable &o) : hb_hashmap_t ()
{
auto iter = hb_iter (o);
if (iter.is_random_access_iterator)
resize (hb_len (iter));
if (iter.is_random_access_iterator || iter.has_fast_len)
alloc (hb_len (iter));
hb_copy (iter, *this);
}
struct item_t
{
K key;
uint32_t hash : 30;
uint32_t is_real_ : 1;
uint32_t is_used_ : 1;
uint32_t is_tombstone_ : 1;
uint32_t hash : 30;
V value;
item_t () : key (),
is_real_ (false), is_used_ (false),
hash (0),
is_used_ (false), is_tombstone_ (false),
value () {}
// Needed for https://github.com/harfbuzz/harfbuzz/issues/4138
K& get_key () { return key; }
V& get_value () { return value; }
bool is_used () const { return is_used_; }
void set_used (bool is_used) { is_used_ = is_used; }
bool is_tombstone () const { return is_tombstone_; }
void set_tombstone (bool is_tombstone) { is_tombstone_ = is_tombstone; }
bool is_real () const { return is_used_ && !is_tombstone_; }
void set_real (bool is_real) { is_real_ = is_real; }
bool is_real () const { return is_real_; }
template <bool v = minus_one,
hb_enable_if (v == false)>
@ -98,10 +101,15 @@ struct hb_hashmap_t
bool operator == (const K &o) const { return hb_deref (key) == hb_deref (o); }
bool operator == (const item_t &o) const { return *this == o.key; }
hb_pair_t<K, V> get_pair() const { return hb_pair_t<K, V> (key, value); }
hb_pair_t<const K &, const V &> get_pair_ref() const { return hb_pair_t<const K &, const V &> (key, value); }
hb_pair_t<const K &, V &> get_pair_ref() { return hb_pair_t<const K &, V &> (key, value); }
uint32_t total_hash () const
{ return (hash * 31) + hb_hash (value); }
{ return (hash * 31u) + hb_hash (value); }
static constexpr bool is_trivial = hb_is_trivially_constructible(K) &&
hb_is_trivially_destructible(K) &&
hb_is_trivially_constructible(V) &&
hb_is_trivially_destructible(V);
};
hb_object_header_t header;
@ -110,6 +118,7 @@ struct hb_hashmap_t
unsigned int occupancy; /* Including tombstones. */
unsigned int mask;
unsigned int prime;
unsigned int max_chain_length;
item_t *items;
friend void swap (hb_hashmap_t& a, hb_hashmap_t& b)
@ -123,6 +132,7 @@ struct hb_hashmap_t
hb_swap (a.occupancy, b.occupancy);
hb_swap (a.mask, b.mask);
hb_swap (a.prime, b.prime);
hb_swap (a.max_chain_length, b.max_chain_length);
hb_swap (a.items, b.items);
}
void init ()
@ -133,16 +143,19 @@ struct hb_hashmap_t
population = occupancy = 0;
mask = 0;
prime = 0;
max_chain_length = 0;
items = nullptr;
}
void fini ()
{
hb_object_fini (this);
if (likely (items)) {
if (likely (items))
{
unsigned size = mask + 1;
for (unsigned i = 0; i < size; i++)
items[i].~item_t ();
if (!item_t::is_trivial)
for (unsigned i = 0; i < size; i++)
items[i].~item_t ();
hb_free (items);
items = nullptr;
}
@ -157,7 +170,7 @@ struct hb_hashmap_t
bool in_error () const { return !successful; }
bool resize (unsigned new_population = 0)
bool alloc (unsigned new_population = 0)
{
if (unlikely (!successful)) return false;
@ -171,8 +184,11 @@ struct hb_hashmap_t
successful = false;
return false;
}
for (auto &_ : hb_iter (new_items, new_size))
new (&_) item_t ();
if (!item_t::is_trivial)
for (auto &_ : hb_iter (new_items, new_size))
new (&_) item_t ();
else
hb_memset (new_items, 0, (size_t) new_size * sizeof (item_t));
unsigned int old_size = size ();
item_t *old_items = items;
@ -181,6 +197,7 @@ struct hb_hashmap_t
population = occupancy = 0;
mask = new_size - 1;
prime = prime_for (power);
max_chain_length = power * 2;
items = new_items;
/* Insert back old items. */
@ -192,7 +209,8 @@ struct hb_hashmap_t
old_items[i].hash,
std::move (old_items[i].value));
}
old_items[i].~item_t ();
if (!item_t::is_trivial)
old_items[i].~item_t ();
}
hb_free (old_items);
@ -201,72 +219,129 @@ struct hb_hashmap_t
}
template <typename KK, typename VV>
bool set_with_hash (KK&& key, uint32_t hash, VV&& value, bool is_delete=false)
bool set_with_hash (KK&& key, uint32_t hash, VV&& value, bool overwrite = true)
{
if (unlikely (!successful)) return false;
if (unlikely ((occupancy + occupancy / 2) >= mask && !resize ())) return false;
item_t &item = item_for_hash (key, hash);
if (unlikely ((occupancy + occupancy / 2) >= mask && !alloc ())) return false;
if (is_delete && !(item == key))
return true; /* Trying to delete non-existent key. */
hash &= 0x3FFFFFFF; // We only store lower 30bit of hash
unsigned int tombstone = (unsigned int) -1;
unsigned int i = hash % prime;
unsigned length = 0;
unsigned step = 0;
while (items[i].is_used ())
{
if ((std::is_integral<K>::value || items[i].hash == hash) &&
items[i] == key)
{
if (!overwrite)
return false;
else
break;
}
if (!items[i].is_real () && tombstone == (unsigned) -1)
tombstone = i;
i = (i + ++step) & mask;
length++;
}
item_t &item = items[tombstone == (unsigned) -1 ? i : tombstone];
if (item.is_used ())
{
occupancy--;
if (!item.is_tombstone ())
population--;
population -= item.is_real ();
}
item.key = std::forward<KK> (key);
item.value = std::forward<VV> (value);
item.hash = hash;
item.set_used (true);
item.set_tombstone (is_delete);
item.set_real (true);
occupancy++;
if (!is_delete)
population++;
population++;
if (unlikely (length > max_chain_length) && occupancy * 8 > mask)
alloc (mask - 8); // This ensures we jump to next larger size
return true;
}
template <typename VV>
bool set (const K &key, VV&& value) { return set_with_hash (key, hb_hash (key), std::forward<VV> (value)); }
bool set (const K &key, VV&& value, bool overwrite = true) { return set_with_hash (key, hb_hash (key), std::forward<VV> (value), overwrite); }
template <typename VV>
bool set (K &&key, VV&& value) { return set_with_hash (std::move (key), hb_hash (key), std::forward<VV> (value)); }
bool set (K &&key, VV&& value, bool overwrite = true)
{
uint32_t hash = hb_hash (key);
return set_with_hash (std::move (key), hash, std::forward<VV> (value), overwrite);
}
bool add (const K &key)
{
uint32_t hash = hb_hash (key);
return set_with_hash (key, hash, item_t::default_value ());
}
const V& get_with_hash (const K &key, uint32_t hash) const
{
if (unlikely (!items)) return item_t::default_value ();
auto &item = item_for_hash (key, hash);
return item.is_real () && item == key ? item.value : item_t::default_value ();
if (!items) return item_t::default_value ();
auto *item = fetch_item (key, hb_hash (key));
if (item)
return item->value;
return item_t::default_value ();
}
const V& get (const K &key) const
{
if (unlikely (!items)) return item_t::default_value ();
if (!items) return item_t::default_value ();
return get_with_hash (key, hb_hash (key));
}
void del (const K &key) { set_with_hash (key, hb_hash (key), item_t::default_value (), true); }
void del (const K &key)
{
if (!items) return;
auto *item = fetch_item (key, hb_hash (key));
if (item)
{
item->set_real (false);
population--;
}
}
/* Has interface. */
const V& operator [] (K k) const { return get (k); }
template <typename VV=V>
bool has (K key, VV **vp = nullptr) const
bool has (const K &key, VV **vp = nullptr) const
{
if (unlikely (!items))
return false;
auto &item = item_for_hash (key, hb_hash (key));
if (item.is_real () && item == key)
if (!items) return false;
auto *item = fetch_item (key, hb_hash (key));
if (item)
{
if (vp) *vp = std::addressof (item.value);
if (vp) *vp = std::addressof (item->value);
return true;
}
else
return false;
return false;
}
item_t *fetch_item (const K &key, uint32_t hash) const
{
hash &= 0x3FFFFFFF; // We only store lower 30bit of hash
unsigned int i = hash % prime;
unsigned step = 0;
while (items[i].is_used ())
{
if ((std::is_integral<K>::value || items[i].hash == hash) &&
items[i] == key)
{
if (items[i].is_real ())
return &items[i];
else
return nullptr;
}
i = (i + ++step) & mask;
}
return nullptr;
}
/* Projection. */
V operator () (K k) const { return get (k); }
const V& operator () (K k) const { return get (k); }
unsigned size () const { return mask ? mask + 1 : 0; }
@ -323,39 +398,37 @@ struct hb_hashmap_t
auto iter_items () const HB_AUTO_RETURN
(
+ hb_iter (items, size ())
+ hb_iter (items, this->size ())
| hb_filter (&item_t::is_real)
)
auto iter_ref () const HB_AUTO_RETURN
(
+ iter_items ()
+ this->iter_items ()
| hb_map (&item_t::get_pair_ref)
)
auto iter () const HB_AUTO_RETURN
(
+ iter_items ()
+ this->iter_items ()
| hb_map (&item_t::get_pair)
)
auto keys_ref () const HB_AUTO_RETURN
(
+ iter_items ()
| hb_map (&item_t::key)
+ this->iter_items ()
| hb_map (&item_t::get_key)
)
auto keys () const HB_AUTO_RETURN
(
+ iter_items ()
| hb_map (&item_t::key)
+ this->keys_ref ()
| hb_map (hb_ridentity)
)
auto values_ref () const HB_AUTO_RETURN
(
+ iter_items ()
| hb_map (&item_t::value)
+ this->iter_items ()
| hb_map (&item_t::get_value)
)
auto values () const HB_AUTO_RETURN
(
+ iter_items ()
| hb_map (&item_t::value)
+ this->values_ref ()
| hb_map (hb_ridentity)
)
@ -393,23 +466,6 @@ struct hb_hashmap_t
hb_hashmap_t& operator << (const hb_pair_t<K&&, V&&>& v)
{ set (std::move (v.first), std::move (v.second)); return *this; }
item_t& item_for_hash (const K &key, uint32_t hash) const
{
hash &= 0x3FFFFFFF; // We only store lower 30bit of hash
unsigned int i = hash % prime;
unsigned int step = 0;
unsigned int tombstone = (unsigned) -1;
while (items[i].is_used ())
{
if (items[i].hash == hash && items[i] == key)
return items[i];
if (tombstone == (unsigned) -1 && items[i].is_tombstone ())
tombstone = i;
i = (i + ++step) & mask;
}
return items[tombstone == (unsigned) -1 ? i : tombstone];
}
static unsigned int prime_for (unsigned int shift)
{
/* Following comment and table copied from glib. */
@ -480,7 +536,7 @@ struct hb_map_t : hb_hashmap_t<hb_codepoint_t,
hb_map_t (hb_map_t &&o) : hashmap (std::move ((hashmap &) o)) {}
hb_map_t& operator= (const hb_map_t&) = default;
hb_map_t& operator= (hb_map_t&&) = default;
hb_map_t (std::initializer_list<hb_pair_t<hb_codepoint_t, hb_codepoint_t>> lst) : hashmap (lst) {}
hb_map_t (std::initializer_list<hb_codepoint_pair_t> lst) : hashmap (lst) {}
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
hb_map_t (const Iterable &o) : hashmap (o) {}

View File

@ -153,8 +153,8 @@ struct hb_reference_wrapper
hb_reference_wrapper (T v) : v (v) {}
bool operator == (const hb_reference_wrapper& o) const { return v == o.v; }
bool operator != (const hb_reference_wrapper& o) const { return v != o.v; }
operator T () const { return v; }
T get () const { return v; }
operator T& () { return v; }
T& get () { return v; }
T v;
};
template <typename T>
@ -163,8 +163,8 @@ struct hb_reference_wrapper<T&>
hb_reference_wrapper (T& v) : v (std::addressof (v)) {}
bool operator == (const hb_reference_wrapper& o) const { return v == o.v; }
bool operator != (const hb_reference_wrapper& o) const { return v != o.v; }
operator T& () const { return *v; }
T& get () const { return *v; }
operator T& () { return *v; }
T& get () { return *v; }
T* v;
};

View File

@ -38,10 +38,10 @@ struct hb_multimap_t
{
void add (hb_codepoint_t k, hb_codepoint_t v)
{
hb_codepoint_t *i;
if (multiples_indices.has (k, &i))
hb_vector_t<hb_codepoint_t> *m;
if (multiples.has (k, &m))
{
multiples_values[*i].push (v);
m->push (v);
return;
}
@ -51,12 +51,7 @@ struct hb_multimap_t
hb_codepoint_t old = *old_v;
singulars.del (k);
multiples_indices.set (k, multiples_values.length);
auto *vec = multiples_values.push ();
vec->push (old);
vec->push (v);
multiples.set (k, hb_vector_t<hb_codepoint_t> {old, v});
return;
}
@ -69,22 +64,31 @@ struct hb_multimap_t
if (singulars.has (k, &v))
return hb_array (v, 1);
hb_codepoint_t *i;
if (multiples_indices.has (k, &i))
return multiples_values[*i].as_array ();
hb_vector_t<hb_codepoint_t> *m;
if (multiples.has (k, &m))
return m->as_array ();
return hb_array_t<const hb_codepoint_t> ();
}
bool in_error () const
{
return singulars.in_error () || multiples_indices.in_error () || multiples_values.in_error ();
if (singulars.in_error () || multiples.in_error ())
return true;
for (const auto &m : multiples.values_ref ())
if (m.in_error ())
return true;
return false;
}
void alloc (unsigned size)
{
singulars.alloc (size);
}
protected:
hb_map_t singulars;
hb_map_t multiples_indices;
hb_vector_t<hb_vector_t<hb_codepoint_t>> multiples_values;
hb_hashmap_t<hb_codepoint_t, hb_vector_t<hb_codepoint_t>> multiples;
};

View File

@ -37,7 +37,7 @@
/* Global nul-content Null pool. Enlarge as necessary. */
#define HB_NULL_POOL_SIZE 448
#define HB_NULL_POOL_SIZE 640
template <typename T, typename>
struct _hb_has_min_size : hb_false_type {};
@ -85,7 +85,7 @@ using hb_null_size = _hb_null_size<T, void>;
template <typename T, typename>
struct _hb_static_size : hb_integral_constant<unsigned, sizeof (T)> {};
template <typename T>
struct _hb_static_size<T, hb_void_t<decltype (T::min_size)>> : hb_integral_constant<unsigned, T::static_size> {};
struct _hb_static_size<T, hb_void_t<decltype (T::static_size)>> : hb_integral_constant<unsigned, T::static_size> {};
template <typename T>
using hb_static_size = _hb_static_size<T, void>;
#define hb_static_size(T) hb_static_size<T>::value
@ -176,7 +176,7 @@ template <typename Type>
static inline Type& Crap () {
static_assert (hb_null_size (Type) <= HB_NULL_POOL_SIZE, "Increase HB_NULL_POOL_SIZE.");
Type *obj = reinterpret_cast<Type *> (_hb_CrapPool);
memcpy (obj, &Null (Type), sizeof (*obj));
memcpy (obj, std::addressof (Null (Type)), sizeof (*obj));
return *obj;
}
template <typename QType>
@ -211,11 +211,11 @@ struct hb_nonnull_ptr_t
T * operator = (T *v_) { return v = v_; }
T * operator -> () const { return get (); }
T & operator * () const { return *get (); }
T ** operator & () const { return &v; }
T ** operator & () const { return std::addressof (v); }
/* Only auto-cast to const types. */
template <typename C> operator const C * () const { return get (); }
operator const char * () const { return (const char *) get (); }
T * get () const { return v ? v : const_cast<T *> (&Null (T)); }
T * get () const { return v ? v : const_cast<T *> (std::addressof (Null (T))); }
T * get_raw () const { return v; }
private:

View File

@ -31,7 +31,7 @@
#include "hb.hh"
#line 32 "hb-number-parser.hh"
#line 35 "hb-number-parser.hh"
static const unsigned char _double_parser_trans_keys[] = {
0u, 0u, 43u, 57u, 46u, 57u, 48u, 57u, 43u, 57u, 48u, 57u, 48u, 101u, 48u, 57u,
46u, 101u, 0
@ -135,12 +135,12 @@ strtod_rl (const char *p, const char **end_ptr /* IN/OUT */)
int cs;
#line 132 "hb-number-parser.hh"
#line 139 "hb-number-parser.hh"
{
cs = double_parser_start;
}
#line 135 "hb-number-parser.hh"
#line 144 "hb-number-parser.hh"
{
int _slen;
int _trans;
@ -198,7 +198,7 @@ _resume:
exp_overflow = true;
}
break;
#line 187 "hb-number-parser.hh"
#line 202 "hb-number-parser.hh"
}
_again:

View File

@ -131,7 +131,7 @@ typedef struct OpenTypeOffsetTable
sfnt_version = sfnt_tag;
/* Take space for numTables, searchRange, entrySelector, RangeShift
* and the TableRecords themselves. */
unsigned num_items = it.len ();
unsigned num_items = hb_len (it);
if (unlikely (!tables.serialize (c, num_items))) return_trace (false);
const char *dir_end = (const char *) c->head;
@ -145,7 +145,7 @@ typedef struct OpenTypeOffsetTable
unsigned len = blob->length;
/* Allocate room for the table and copy it. */
char *start = (char *) c->allocate_size<void> (len);
char *start = (char *) c->allocate_size<void> (len, false);
if (unlikely (!start)) return false;
TableRecord &rec = tables.arrayZ[i];

View File

@ -312,6 +312,8 @@ struct _hb_has_null<Type, true>
template <typename Type, typename OffsetType, bool has_null=true>
struct OffsetTo : Offset<OffsetType, has_null>
{
using target_t = Type;
// Make sure Type is not unbounded; works only for types that are fully defined at OffsetTo time.
static_assert (has_null == false ||
(hb_has_null_size (Type) || !hb_has_min_size (Type)), "");
@ -416,12 +418,15 @@ struct OffsetTo : Offset<OffsetType, has_null>
{
TRACE_SANITIZE (this);
if (unlikely (!c->check_struct (this))) return_trace (false);
if (unlikely (this->is_null ())) return_trace (true);
//if (unlikely (this->is_null ())) return_trace (true);
if (unlikely ((const char *) base + (unsigned) *this < (const char *) base)) return_trace (false);
return_trace (true);
}
template <typename ...Ts>
#ifndef HB_OPTIMIZE_SIZE
HB_ALWAYS_INLINE
#endif
bool sanitize (hb_sanitize_context_t *c, const void *base, Ts&&... ds) const
{
TRACE_SANITIZE (this);
@ -462,24 +467,16 @@ struct UnsizedArrayOf
HB_DELETE_CREATE_COPY_ASSIGN (UnsizedArrayOf);
const Type& operator [] (int i_) const
const Type& operator [] (unsigned int i) const
{
unsigned int i = (unsigned int) i_;
const Type *p = &arrayZ[i];
if (unlikely ((const void *) p < (const void *) arrayZ)) return Null (Type); /* Overflowed. */
_hb_compiler_memory_r_barrier ();
return *p;
return arrayZ[i];
}
Type& operator [] (int i_)
Type& operator [] (unsigned int i)
{
unsigned int i = (unsigned int) i_;
Type *p = &arrayZ[i];
if (unlikely ((const void *) p < (const void *) arrayZ)) return Crap (Type); /* Overflowed. */
_hb_compiler_memory_r_barrier ();
return *p;
return arrayZ[i];
}
unsigned int get_size (unsigned int len) const
static unsigned int get_size (unsigned int len)
{ return len * Type::static_size; }
template <typename T> operator T * () { return arrayZ; }
@ -533,6 +530,7 @@ struct UnsizedArrayOf
}
template <typename ...Ts>
HB_ALWAYS_INLINE
bool sanitize (hb_sanitize_context_t *c, unsigned int count, Ts&&... ds) const
{
TRACE_SANITIZE (this);
@ -721,6 +719,7 @@ struct ArrayOf
}
template <typename ...Ts>
HB_ALWAYS_INLINE
bool sanitize (hb_sanitize_context_t *c, Ts&&... ds) const
{
TRACE_SANITIZE (this);
@ -736,7 +735,7 @@ struct ArrayOf
bool sanitize_shallow (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (len.sanitize (c) && c->check_array (arrayZ, len));
return_trace (len.sanitize (c) && c->check_array_sized (arrayZ, len, sizeof (LenType)));
}
public:
@ -797,7 +796,7 @@ template <typename Type>
using List16OfOffset16To = List16OfOffsetTo<Type, HBUINT16>;
/* An array starting at second element. */
template <typename Type, typename LenType=HBUINT16>
template <typename Type, typename LenType>
struct HeadlessArrayOf
{
static constexpr unsigned item_size = Type::static_size;
@ -861,6 +860,7 @@ struct HeadlessArrayOf
}
template <typename ...Ts>
HB_ALWAYS_INLINE
bool sanitize (hb_sanitize_context_t *c, Ts&&... ds) const
{
TRACE_SANITIZE (this);
@ -878,7 +878,7 @@ struct HeadlessArrayOf
{
TRACE_SANITIZE (this);
return_trace (lenP1.sanitize (c) &&
(!lenP1 || c->check_array (arrayZ, lenP1 - 1)));
(!lenP1 || c->check_array_sized (arrayZ, lenP1 - 1, sizeof (LenType))));
}
public:
@ -887,6 +887,7 @@ struct HeadlessArrayOf
public:
DEFINE_SIZE_ARRAY (sizeof (LenType), arrayZ);
};
template <typename Type> using HeadlessArray16Of = HeadlessArrayOf<Type, HBUINT16>;
/* An array storing length-1. */
template <typename Type, typename LenType=HBUINT16>
@ -912,6 +913,7 @@ struct ArrayOfM1
{ return lenM1.static_size + (lenM1 + 1) * Type::static_size; }
template <typename ...Ts>
HB_ALWAYS_INLINE
bool sanitize (hb_sanitize_context_t *c, Ts&&... ds) const
{
TRACE_SANITIZE (this);
@ -929,7 +931,7 @@ struct ArrayOfM1
{
TRACE_SANITIZE (this);
return_trace (lenM1.sanitize (c) &&
(c->check_array (arrayZ, lenM1 + 1)));
(c->check_array_sized (arrayZ, lenM1 + 1, sizeof (LenType))));
}
public:
@ -1096,6 +1098,7 @@ struct VarSizedBinSearchArrayOf
{ return header.static_size + header.nUnits * header.unitSize; }
template <typename ...Ts>
HB_ALWAYS_INLINE
bool sanitize (hb_sanitize_context_t *c, Ts&&... ds) const
{
TRACE_SANITIZE (this);

View File

@ -48,12 +48,24 @@ static inline const Type& StructAtOffsetOrNull (const void *P, unsigned int offs
struct code_pair_t
{
hb_codepoint_t code;
unsigned code;
hb_codepoint_t glyph;
};
using str_buff_t = hb_vector_t<unsigned char>;
using str_buff_vec_t = hb_vector_t<str_buff_t>;
using glyph_to_sid_map_t = hb_vector_t<code_pair_t>;
struct length_f_t
{
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
unsigned operator () (const Iterable &_) const { return hb_len (hb_iter (_)); }
unsigned operator () (unsigned _) const { return _; }
}
HB_FUNCOBJ (length_f);
/* CFF INDEX */
template <typename COUNT>
@ -62,42 +74,52 @@ struct CFFIndex
unsigned int offset_array_size () const
{ return offSize * (count + 1); }
CFFIndex *copy (hb_serialize_context_t *c) const
{
TRACE_SERIALIZE (this);
unsigned int size = get_size ();
CFFIndex *out = c->allocate_size<CFFIndex> (size, false);
if (likely (out))
hb_memcpy (out, this, size);
return_trace (out);
}
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
bool serialize (hb_serialize_context_t *c,
const Iterable &iterable)
const Iterable &iterable,
const unsigned *p_data_size = nullptr)
{
TRACE_SERIALIZE (this);
unsigned data_size;
if (p_data_size)
data_size = *p_data_size;
else
total_size (iterable, &data_size);
auto it = hb_iter (iterable);
serialize_header(c, + it | hb_map (hb_iter) | hb_map (hb_len));
if (unlikely (!serialize_header (c, +it, data_size))) return_trace (false);
unsigned char *ret = c->allocate_size<unsigned char> (data_size, false);
if (unlikely (!ret)) return_trace (false);
for (const auto &_ : +it)
hb_iter (_).copy (c);
{
unsigned len = _.length;
if (!len)
continue;
if (len <= 1)
{
*ret++ = *_.arrayZ;
continue;
}
hb_memcpy (ret, _.arrayZ, len);
ret += len;
}
return_trace (true);
}
template <typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
bool serialize_header (hb_serialize_context_t *c,
Iterator it)
Iterator it,
unsigned data_size)
{
TRACE_SERIALIZE (this);
unsigned total = + it | hb_reduce (hb_add, 0);
unsigned off_size = (hb_bit_storage (total + 1) + 7) / 8;
unsigned off_size = (hb_bit_storage (data_size + 1) + 7) / 8;
/* serialize CFFIndex header */
if (unlikely (!c->extend_min (this))) return_trace (false);
this->count = it.len ();
this->count = hb_len (it);
if (!this->count) return_trace (true);
if (unlikely (!c->extend (this->offSize))) return_trace (false);
this->offSize = off_size;
@ -106,25 +128,88 @@ struct CFFIndex
/* serialize indices */
unsigned int offset = 1;
unsigned int i = 0;
for (unsigned _ : +it)
if (HB_OPTIMIZE_SIZE_VAL)
{
set_offset_at (i++, offset);
offset += _;
unsigned int i = 0;
for (const auto &_ : +it)
{
set_offset_at (i++, offset);
offset += length_f (_);
}
set_offset_at (i, offset);
}
set_offset_at (i, offset);
else
switch (off_size)
{
case 1:
{
HBUINT8 *p = (HBUINT8 *) offsets;
for (const auto &_ : +it)
{
*p++ = offset;
offset += length_f (_);
}
*p = offset;
}
break;
case 2:
{
HBUINT16 *p = (HBUINT16 *) offsets;
for (const auto &_ : +it)
{
*p++ = offset;
offset += length_f (_);
}
*p = offset;
}
break;
case 3:
{
HBUINT24 *p = (HBUINT24 *) offsets;
for (const auto &_ : +it)
{
*p++ = offset;
offset += length_f (_);
}
*p = offset;
}
break;
case 4:
{
HBUINT32 *p = (HBUINT32 *) offsets;
for (const auto &_ : +it)
{
*p++ = offset;
offset += length_f (_);
}
*p = offset;
}
break;
default:
break;
}
assert (offset == data_size + 1);
return_trace (true);
}
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
static unsigned total_size (const Iterable &iterable)
static unsigned total_size (const Iterable &iterable, unsigned *data_size = nullptr)
{
auto it = + hb_iter (iterable) | hb_map (hb_iter) | hb_map (hb_len);
if (!it) return 0;
auto it = + hb_iter (iterable);
if (!it)
{
if (data_size) *data_size = 0;
return min_size;
}
unsigned total = 0;
for (const auto &_ : +it)
total += length_f (_);
if (data_size) *data_size = total;
unsigned total = + it | hb_reduce (hb_add, 0);
unsigned off_size = (hb_bit_storage (total + 1) + 7) / 8;
return min_size + HBUINT8::static_size + (hb_len (it) + 1) * off_size + total;
@ -133,13 +218,16 @@ struct CFFIndex
void set_offset_at (unsigned int index, unsigned int offset)
{
assert (index <= count);
HBUINT8 *p = offsets + offSize * index + offSize;
unsigned int size = offSize;
for (; size; size--)
const HBUINT8 *p = offsets;
switch (size)
{
--p;
*p = offset & 0xFF;
offset >>= 8;
case 1: ((HBUINT8 *) p)[index] = offset; break;
case 2: ((HBUINT16 *) p)[index] = offset; break;
case 3: ((HBUINT24 *) p)[index] = offset; break;
case 4: ((HBUINT32 *) p)[index] = offset; break;
default: return;
}
}
@ -149,37 +237,30 @@ struct CFFIndex
assert (index <= count);
unsigned int size = offSize;
const HBUINT8 *p = offsets + size * index;
const HBUINT8 *p = offsets;
switch (size)
{
case 1: return * (HBUINT8 *) p;
case 2: return * (HBUINT16 *) p;
case 3: return * (HBUINT24 *) p;
case 4: return * (HBUINT32 *) p;
case 1: return ((HBUINT8 *) p)[index];
case 2: return ((HBUINT16 *) p)[index];
case 3: return ((HBUINT24 *) p)[index];
case 4: return ((HBUINT32 *) p)[index];
default: return 0;
}
}
unsigned int length_at (unsigned int index) const
{
unsigned offset0 = offset_at (index);
unsigned offset1 = offset_at (index + 1);
if (unlikely (offset1 < offset0 || offset1 > offset_at (count)))
return 0;
return offset1 - offset0;
}
const unsigned char *data_base () const
{ return (const unsigned char *) this + min_size + offSize.static_size + offset_array_size (); }
{ return (const unsigned char *) this + min_size + offSize.static_size - 1 + offset_array_size (); }
public:
hb_ubytes_t operator [] (unsigned int index) const
{
if (unlikely (index >= count)) return hb_ubytes_t ();
_hb_compiler_memory_r_barrier ();
unsigned length = length_at (index);
if (unlikely (!length)) return hb_ubytes_t ();
return hb_ubytes_t (data_base () + offset_at (index) - 1, length);
unsigned offset0 = offset_at (index);
unsigned offset1 = offset_at (index + 1);
if (unlikely (offset1 < offset0 || offset1 > offset_at (count)))
return hb_ubytes_t ();
return hb_ubytes_t (data_base () + offset0, offset1 - offset0);
}
unsigned int get_size () const
@ -197,7 +278,7 @@ struct CFFIndex
(count < count + 1u &&
c->check_struct (&offSize) && offSize >= 1 && offSize <= 4 &&
c->check_array (offsets, offSize, count + 1u) &&
c->check_array ((const HBUINT8*) data_base (), 1, offset_at (count) - 1)))));
c->check_array ((const HBUINT8*) data_base (), 1, offset_at (count))))));
}
public:
@ -211,47 +292,6 @@ struct CFFIndex
DEFINE_SIZE_MIN (COUNT::static_size);
};
template <typename COUNT, typename TYPE>
struct CFFIndexOf : CFFIndex<COUNT>
{
template <typename DATA, typename PARAM1, typename PARAM2>
bool serialize (hb_serialize_context_t *c,
unsigned int offSize_,
const DATA *dataArray,
unsigned int dataArrayLen,
const hb_vector_t<unsigned int> &dataSizeArray,
const PARAM1 &param1,
const PARAM2 &param2)
{
TRACE_SERIALIZE (this);
/* serialize CFFIndex header */
if (unlikely (!c->extend_min (this))) return_trace (false);
this->count = dataArrayLen;
this->offSize = offSize_;
if (unlikely (!c->allocate_size<HBUINT8> (offSize_ * (dataArrayLen + 1), false)))
return_trace (false);
/* serialize indices */
unsigned int offset = 1;
unsigned int i = 0;
for (; i < dataArrayLen; i++)
{
this->set_offset_at (i, offset);
offset += dataSizeArray[i];
}
this->set_offset_at (i, offset);
/* serialize data */
for (unsigned int i = 0; i < dataArrayLen; i++)
{
TYPE *dest = c->start_embed<TYPE> ();
if (unlikely (!dest || !dest->serialize (c, dataArray[i], param1, param2)))
return_trace (false);
}
return_trace (true);
}
};
/* Top Dict, Font Dict, Private Dict */
struct Dict : UnsizedByteStr
{
@ -327,7 +367,7 @@ struct table_info_t
};
template <typename COUNT>
struct FDArray : CFFIndexOf<COUNT, FontDict>
struct FDArray : CFFIndex<COUNT>
{
template <typename DICTVAL, typename INFO, typename Iterator, typename OP_SERIALIZER>
bool serialize (hb_serialize_context_t *c,
@ -338,7 +378,11 @@ struct FDArray : CFFIndexOf<COUNT, FontDict>
/* serialize INDEX data */
hb_vector_t<unsigned> sizes;
if (it.is_random_access_iterator)
sizes.alloc (hb_len (it));
c->push ();
char *data_base = c->head;
+ it
| hb_map ([&] (const hb_pair_t<const DICTVAL&, const INFO&> &_)
{
@ -348,10 +392,16 @@ struct FDArray : CFFIndexOf<COUNT, FontDict>
})
| hb_sink (sizes)
;
unsigned data_size = c->head - data_base;
c->pop_pack (false);
if (unlikely (sizes.in_error ())) return_trace (false);
/* It just happens that the above is packed right after the header below.
* Such a hack. */
/* serialize INDEX header */
return_trace (CFFIndex<COUNT>::serialize_header (c, hb_iter (sizes)));
return_trace (CFFIndex<COUNT>::serialize_header (c, hb_iter (sizes), data_size));
}
};
@ -368,8 +418,11 @@ struct FDSelect0 {
return_trace (true);
}
hb_codepoint_t get_fd (hb_codepoint_t glyph) const
{ return (hb_codepoint_t) fds[glyph]; }
unsigned get_fd (hb_codepoint_t glyph) const
{ return fds[glyph]; }
hb_pair_t<unsigned, hb_codepoint_t> get_fd_range (hb_codepoint_t glyph) const
{ return {fds[glyph], glyph + 1}; }
unsigned int get_size (unsigned int num_glyphs) const
{ return HBUINT8::static_size * num_glyphs; }
@ -427,12 +480,20 @@ struct FDSelect3_4
return +1;
}
hb_codepoint_t get_fd (hb_codepoint_t glyph) const
unsigned get_fd (hb_codepoint_t glyph) const
{
auto *range = hb_bsearch (glyph, &ranges[0], nRanges () - 1, sizeof (ranges[0]), _cmp_range);
return range ? range->fd : ranges[nRanges () - 1].fd;
}
hb_pair_t<unsigned, hb_codepoint_t> get_fd_range (hb_codepoint_t glyph) const
{
auto *range = hb_bsearch (glyph, &ranges[0], nRanges () - 1, sizeof (ranges[0]), _cmp_range);
unsigned fd = range ? range->fd : ranges[nRanges () - 1].fd;
hb_codepoint_t end = range ? range[1].first : ranges[nRanges () - 1].first;
return {fd, end};
}
GID_TYPE &nRanges () { return ranges.len; }
GID_TYPE nRanges () const { return ranges.len; }
GID_TYPE &sentinel () { return StructAfter<GID_TYPE> (ranges[nRanges () - 1]); }
@ -469,7 +530,7 @@ struct FDSelect
}
}
hb_codepoint_t get_fd (hb_codepoint_t glyph) const
unsigned get_fd (hb_codepoint_t glyph) const
{
if (this == &Null (FDSelect)) return 0;
@ -480,6 +541,18 @@ struct FDSelect
default:return 0;
}
}
/* Returns pair of fd and one after last glyph in range. */
hb_pair_t<unsigned, hb_codepoint_t> get_fd_range (hb_codepoint_t glyph) const
{
if (this == &Null (FDSelect)) return {0, 1};
switch (format)
{
case 0: return u.format0.get_fd_range (glyph);
case 3: return u.format3.get_fd_range (glyph);
default:return {0, 1};
}
}
bool sanitize (hb_sanitize_context_t *c, unsigned int fdcount) const
{

View File

@ -574,11 +574,11 @@ bool OT::cff1::accelerator_t::get_path (hb_font_t *font, hb_codepoint_t glyph, h
struct get_seac_param_t
{
get_seac_param_t (const OT::cff1::accelerator_t *_cff) : cff (_cff) {}
get_seac_param_t (const OT::cff1::accelerator_subset_t *_cff) : cff (_cff) {}
bool has_seac () const { return base && accent; }
const OT::cff1::accelerator_t *cff;
const OT::cff1::accelerator_subset_t *cff;
hb_codepoint_t base = 0;
hb_codepoint_t accent = 0;
};
@ -596,7 +596,7 @@ struct cff1_cs_opset_seac_t : cff1_cs_opset_t<cff1_cs_opset_seac_t, get_seac_par
}
};
bool OT::cff1::accelerator_t::get_seac_components (hb_codepoint_t glyph, hb_codepoint_t *base, hb_codepoint_t *accent) const
bool OT::cff1::accelerator_subset_t::get_seac_components (hb_codepoint_t glyph, hb_codepoint_t *base, hb_codepoint_t *accent) const
{
if (unlikely (!is_valid () || (glyph >= num_glyphs))) return false;

View File

@ -28,7 +28,7 @@
#define HB_OT_CFF1_TABLE_HH
#include "hb-ot-cff-common.hh"
#include "hb-subset-cff1.hh"
#include "hb-subset-cff-common.hh"
#include "hb-draw.hh"
#include "hb-paint.hh"
@ -44,7 +44,7 @@ namespace CFF {
* CFF -- Compact Font Format (CFF)
* https://www.adobe.com/content/dam/acom/en/devnet/font/pdfs/5176.CFF.pdf
*/
#define HB_OT_TAG_cff1 HB_TAG('C','F','F',' ')
#define HB_OT_TAG_CFF1 HB_TAG('C','F','F',' ')
#define CFF_UNDEF_SID CFF_UNDEF_CODE
@ -52,7 +52,6 @@ enum EncodingID { StandardEncoding = 0, ExpertEncoding = 1 };
enum CharsetID { ISOAdobeCharset = 0, ExpertCharset = 1, ExpertSubsetCharset = 2 };
typedef CFFIndex<HBUINT16> CFF1Index;
template <typename Type> struct CFF1IndexOf : CFFIndexOf<HBUINT16, Type> {};
typedef CFFIndex<HBUINT16> CFF1Index;
typedef CFF1Index CFF1CharStrings;
@ -110,6 +109,7 @@ struct Encoding1 {
hb_codepoint_t get_code (hb_codepoint_t glyph) const
{
/* TODO: Add cache like get_sid. */
assert (glyph > 0);
glyph--;
for (unsigned int i = 0; i < nRanges (); i++)
@ -173,11 +173,7 @@ struct Encoding
bool serialize (hb_serialize_context_t *c, const Encoding &src)
{
TRACE_SERIALIZE (this);
unsigned int size = src.get_size ();
Encoding *dest = c->allocate_size<Encoding> (size);
if (unlikely (!dest)) return_trace (false);
hb_memcpy (dest, &src, size);
return_trace (true);
return_trace (c->embed (src));
}
/* serialize a subset Encoding */
@ -312,26 +308,29 @@ struct Encoding
};
/* Charset */
struct Charset0 {
bool sanitize (hb_sanitize_context_t *c, unsigned int num_glyphs) const
struct Charset0
{
bool sanitize (hb_sanitize_context_t *c, unsigned int num_glyphs, unsigned *num_charset_entries) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) && sids[num_glyphs - 1].sanitize (c));
if (num_charset_entries) *num_charset_entries = num_glyphs;
return_trace (sids.sanitize (c, num_glyphs - 1));
}
hb_codepoint_t get_sid (hb_codepoint_t glyph, unsigned num_glyphs) const
{
if (unlikely (glyph >= num_glyphs)) return 0;
if (glyph == 0)
if (unlikely (glyph == 0))
return 0;
else
return sids[glyph - 1];
}
void collect_glyph_to_sid_map (hb_map_t *mapping, unsigned int num_glyphs) const
void collect_glyph_to_sid_map (glyph_to_sid_map_t *mapping, unsigned int num_glyphs) const
{
mapping->resize (num_glyphs, false);
for (hb_codepoint_t gid = 1; gid < num_glyphs; gid++)
mapping->set (gid, sids[gid - 1]);
mapping->arrayZ[gid] = {sids[gid - 1], gid};
}
hb_codepoint_t get_glyph (hb_codepoint_t sid, unsigned int num_glyphs) const
@ -347,13 +346,13 @@ struct Charset0 {
return 0;
}
unsigned int get_size (unsigned int num_glyphs) const
static unsigned int get_size (unsigned int num_glyphs)
{
assert (num_glyphs > 0);
return HBUINT16::static_size * (num_glyphs - 1);
return UnsizedArrayOf<HBUINT16>::get_size (num_glyphs - 1);
}
HBUINT16 sids[HB_VAR_ARRAY];
UnsizedArrayOf<HBUINT16> sids;
DEFINE_SIZE_ARRAY(0, sids);
};
@ -374,38 +373,62 @@ struct Charset_Range {
template <typename TYPE>
struct Charset1_2 {
bool sanitize (hb_sanitize_context_t *c, unsigned int num_glyphs) const
bool sanitize (hb_sanitize_context_t *c, unsigned int num_glyphs, unsigned *num_charset_entries) const
{
TRACE_SANITIZE (this);
if (unlikely (!c->check_struct (this)))
return_trace (false);
num_glyphs--;
for (unsigned int i = 0; num_glyphs > 0; i++)
unsigned i;
for (i = 0; num_glyphs > 0; i++)
{
if (unlikely (!ranges[i].sanitize (c) || (num_glyphs < ranges[i].nLeft + 1)))
return_trace (false);
num_glyphs -= (ranges[i].nLeft + 1);
}
if (num_charset_entries)
*num_charset_entries = i;
return_trace (true);
}
hb_codepoint_t get_sid (hb_codepoint_t glyph, unsigned num_glyphs) const
hb_codepoint_t get_sid (hb_codepoint_t glyph, unsigned num_glyphs,
code_pair_t *cache = nullptr) const
{
if (unlikely (glyph >= num_glyphs)) return 0;
if (glyph == 0) return 0;
glyph--;
for (unsigned int i = 0;; i++)
unsigned i;
hb_codepoint_t start_glyph;
if (cache && likely (cache->glyph <= glyph))
{
if (glyph <= ranges[i].nLeft)
return (hb_codepoint_t) ranges[i].first + glyph;
glyph -= (ranges[i].nLeft + 1);
i = cache->code;
start_glyph = cache->glyph;
}
else
{
if (unlikely (glyph == 0)) return 0;
i = 0;
start_glyph = 1;
}
glyph -= start_glyph;
for (;; i++)
{
unsigned count = ranges[i].nLeft;
if (glyph <= count)
{
if (cache)
*cache = {i, start_glyph};
return ranges[i].first + glyph;
}
count++;
start_glyph += count;
glyph -= count;
}
return 0;
}
void collect_glyph_to_sid_map (hb_map_t *mapping, unsigned int num_glyphs) const
void collect_glyph_to_sid_map (glyph_to_sid_map_t *mapping, unsigned int num_glyphs) const
{
mapping->resize (num_glyphs, false);
hb_codepoint_t gid = 1;
if (gid >= num_glyphs)
return;
@ -413,8 +436,9 @@ struct Charset1_2 {
{
hb_codepoint_t sid = ranges[i].first;
unsigned count = ranges[i].nLeft + 1;
unsigned last = gid + count;
for (unsigned j = 0; j < count; j++)
mapping->set (gid++, sid++);
mapping->arrayZ[gid++] = {sid++, last - 1};
if (gid >= num_glyphs)
break;
@ -439,21 +463,26 @@ struct Charset1_2 {
unsigned int get_size (unsigned int num_glyphs) const
{
unsigned int size = HBUINT8::static_size;
int glyph = (int)num_glyphs;
int glyph = (int) num_glyphs;
unsigned num_ranges = 0;
assert (glyph > 0);
glyph--;
for (unsigned int i = 0; glyph > 0; i++)
{
glyph -= (ranges[i].nLeft + 1);
size += Charset_Range<TYPE>::static_size;
num_ranges++;
}
return size;
return get_size_for_ranges (num_ranges);
}
Charset_Range<TYPE> ranges[HB_VAR_ARRAY];
static unsigned int get_size_for_ranges (unsigned int num_ranges)
{
return UnsizedArrayOf<Charset_Range<TYPE> >::get_size (num_ranges);
}
UnsizedArrayOf<Charset_Range<TYPE>> ranges;
DEFINE_SIZE_ARRAY (0, ranges);
};
@ -469,11 +498,7 @@ struct Charset
bool serialize (hb_serialize_context_t *c, const Charset &src, unsigned int num_glyphs)
{
TRACE_SERIALIZE (this);
unsigned int size = src.get_size (num_glyphs);
Charset *dest = c->allocate_size<Charset> (size);
if (unlikely (!dest)) return_trace (false);
hb_memcpy (dest, &src, size);
return_trace (true);
return_trace (c->embed ((const char *) &src, src.get_size (num_glyphs)));
}
/* serialize a subset Charset */
@ -490,13 +515,13 @@ struct Charset
{
case 0:
{
Charset0 *fmt0 = c->allocate_size<Charset0> (Charset0::min_size + HBUINT16::static_size * (num_glyphs - 1));
Charset0 *fmt0 = c->allocate_size<Charset0> (Charset0::get_size (num_glyphs), false);
if (unlikely (!fmt0)) return_trace (false);
unsigned int glyph = 0;
for (unsigned int i = 0; i < sid_ranges.length; i++)
{
hb_codepoint_t sid = sid_ranges[i].code;
for (int left = (int)sid_ranges[i].glyph; left >= 0; left--)
hb_codepoint_t sid = sid_ranges.arrayZ[i].code;
for (int left = (int)sid_ranges.arrayZ[i].glyph; left >= 0; left--)
fmt0->sids[glyph++] = sid++;
}
}
@ -504,29 +529,35 @@ struct Charset
case 1:
{
Charset1 *fmt1 = c->allocate_size<Charset1> (Charset1::min_size + Charset1_Range::static_size * sid_ranges.length);
Charset1 *fmt1 = c->allocate_size<Charset1> (Charset1::get_size_for_ranges (sid_ranges.length), false);
if (unlikely (!fmt1)) return_trace (false);
hb_codepoint_t all_glyphs = 0;
for (unsigned int i = 0; i < sid_ranges.length; i++)
{
if (unlikely (!(sid_ranges[i].glyph <= 0xFF)))
return_trace (false);
fmt1->ranges[i].first = sid_ranges[i].code;
fmt1->ranges[i].nLeft = sid_ranges[i].glyph;
auto &_ = sid_ranges.arrayZ[i];
all_glyphs |= _.glyph;
fmt1->ranges[i].first = _.code;
fmt1->ranges[i].nLeft = _.glyph;
}
if (unlikely (!(all_glyphs <= 0xFF)))
return_trace (false);
}
break;
case 2:
{
Charset2 *fmt2 = c->allocate_size<Charset2> (Charset2::min_size + Charset2_Range::static_size * sid_ranges.length);
Charset2 *fmt2 = c->allocate_size<Charset2> (Charset2::get_size_for_ranges (sid_ranges.length), false);
if (unlikely (!fmt2)) return_trace (false);
hb_codepoint_t all_glyphs = 0;
for (unsigned int i = 0; i < sid_ranges.length; i++)
{
if (unlikely (!(sid_ranges[i].glyph <= 0xFFFF)))
return_trace (false);
fmt2->ranges[i].first = sid_ranges[i].code;
fmt2->ranges[i].nLeft = sid_ranges[i].glyph;
auto &_ = sid_ranges.arrayZ[i];
all_glyphs |= _.glyph;
fmt2->ranges[i].first = _.code;
fmt2->ranges[i].nLeft = _.glyph;
}
if (unlikely (!(all_glyphs <= 0xFFFF)))
return_trace (false);
}
break;
@ -545,18 +576,19 @@ struct Charset
}
}
hb_codepoint_t get_sid (hb_codepoint_t glyph, unsigned int num_glyphs) const
hb_codepoint_t get_sid (hb_codepoint_t glyph, unsigned int num_glyphs,
code_pair_t *cache = nullptr) const
{
switch (format)
{
case 0: return u.format0.get_sid (glyph, num_glyphs);
case 1: return u.format1.get_sid (glyph, num_glyphs);
case 2: return u.format2.get_sid (glyph, num_glyphs);
case 1: return u.format1.get_sid (glyph, num_glyphs, cache);
case 2: return u.format2.get_sid (glyph, num_glyphs, cache);
default:return 0;
}
}
void collect_glyph_to_sid_map (hb_map_t *mapping, unsigned int num_glyphs) const
void collect_glyph_to_sid_map (glyph_to_sid_map_t *mapping, unsigned int num_glyphs) const
{
switch (format)
{
@ -578,7 +610,7 @@ struct Charset
}
}
bool sanitize (hb_sanitize_context_t *c) const
bool sanitize (hb_sanitize_context_t *c, unsigned *num_charset_entries) const
{
TRACE_SANITIZE (this);
if (unlikely (!c->check_struct (this)))
@ -586,9 +618,9 @@ struct Charset
switch (format)
{
case 0: return_trace (u.format0.sanitize (c, c->get_num_glyphs ()));
case 1: return_trace (u.format1.sanitize (c, c->get_num_glyphs ()));
case 2: return_trace (u.format2.sanitize (c, c->get_num_glyphs ()));
case 0: return_trace (u.format0.sanitize (c, c->get_num_glyphs (), num_charset_entries));
case 1: return_trace (u.format1.sanitize (c, c->get_num_glyphs (), num_charset_entries));
case 2: return_trace (u.format2.sanitize (c, c->get_num_glyphs (), num_charset_entries));
default:return_trace (false);
}
}
@ -606,10 +638,10 @@ struct Charset
struct CFF1StringIndex : CFF1Index
{
bool serialize (hb_serialize_context_t *c, const CFF1StringIndex &strings,
const hb_inc_bimap_t &sidmap)
const hb_vector_t<unsigned> &sidmap)
{
TRACE_SERIALIZE (this);
if (unlikely ((strings.count == 0) || (sidmap.get_population () == 0)))
if (unlikely ((strings.count == 0) || (sidmap.length == 0)))
{
if (unlikely (!c->extend_min (this->count)))
return_trace (false);
@ -617,15 +649,13 @@ struct CFF1StringIndex : CFF1Index
return_trace (true);
}
byte_str_array_t bytesArray;
if (!bytesArray.resize (sidmap.get_population ()))
return_trace (false);
for (unsigned int i = 0; i < strings.count; i++)
{
hb_codepoint_t j = sidmap[i];
if (j != HB_MAP_VALUE_INVALID)
bytesArray[j] = strings[i];
}
if (unlikely (sidmap.in_error ())) return_trace (false);
// Save this in a vector since serialize() iterates it twice.
hb_vector_t<hb_ubytes_t> bytesArray (+ hb_iter (sidmap)
| hb_map (strings));
if (unlikely (bytesArray.in_error ())) return_trace (false);
bool result = CFF1Index::serialize (c, bytesArray);
return_trace (result);
@ -932,7 +962,7 @@ struct cff1_private_dict_opset_t : dict_opset_t
}
};
struct cff1_private_dict_opset_subset : dict_opset_t
struct cff1_private_dict_opset_subset_t : dict_opset_t
{
static void process_op (op_code_t op, num_interp_env_t& env, cff1_private_dict_values_subset_t& dictval)
{
@ -978,7 +1008,7 @@ typedef dict_interpreter_t<cff1_top_dict_opset_t, cff1_top_dict_values_t, cff1_t
typedef dict_interpreter_t<cff1_font_dict_opset_t, cff1_font_dict_values_t> cff1_font_dict_interpreter_t;
typedef CFF1Index CFF1NameIndex;
typedef CFF1IndexOf<TopDict> CFF1TopDictIndex;
typedef CFF1Index CFF1TopDictIndex;
struct cff1_font_dict_values_mod_t
{
@ -1019,7 +1049,7 @@ using namespace CFF;
struct cff1
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_cff1;
static constexpr hb_tag_t tableTag = HB_OT_TAG_CFF1;
bool sanitize (hb_sanitize_context_t *c) const
{
@ -1031,8 +1061,12 @@ struct cff1
template <typename PRIVOPSET, typename PRIVDICTVAL>
struct accelerator_templ_t
{
void init (hb_face_t *face)
static constexpr hb_tag_t tableTag = cff1::tableTag;
accelerator_templ_t (hb_face_t *face)
{
if (!face) return;
topDict.init ();
fontDicts.init ();
privateDicts.init ();
@ -1046,22 +1080,22 @@ struct cff1
const OT::cff1 *cff = this->blob->template as<OT::cff1> ();
if (cff == &Null (OT::cff1))
{ fini (); return; }
goto fail;
nameIndex = &cff->nameIndex (cff);
if ((nameIndex == &Null (CFF1NameIndex)) || !nameIndex->sanitize (&sc))
{ fini (); return; }
goto fail;
topDictIndex = &StructAtOffset<CFF1TopDictIndex> (nameIndex, nameIndex->get_size ());
if ((topDictIndex == &Null (CFF1TopDictIndex)) || !topDictIndex->sanitize (&sc) || (topDictIndex->count == 0))
{ fini (); return; }
goto fail;
{ /* parse top dict */
const hb_ubytes_t topDictStr = (*topDictIndex)[0];
if (unlikely (!topDictStr.sanitize (&sc))) { fini (); return; }
if (unlikely (!topDictStr.sanitize (&sc))) goto fail;
cff1_top_dict_interp_env_t env (topDictStr);
cff1_top_dict_interpreter_t top_interp (env);
if (unlikely (!top_interp.interpret (topDict))) { fini (); return; }
if (unlikely (!top_interp.interpret (topDict))) goto fail;
}
if (is_predef_charset ())
@ -1069,7 +1103,7 @@ struct cff1
else
{
charset = &StructAtOffsetOrNull<Charset> (cff, topDict.CharsetOffset);
if (unlikely ((charset == &Null (Charset)) || !charset->sanitize (&sc))) { fini (); return; }
if (unlikely ((charset == &Null (Charset)) || !charset->sanitize (&sc, &num_charset_entries))) goto fail;
}
fdCount = 1;
@ -1079,7 +1113,7 @@ struct cff1
fdSelect = &StructAtOffsetOrNull<CFF1FDSelect> (cff, topDict.FDSelectOffset);
if (unlikely ((fdArray == &Null (CFF1FDArray)) || !fdArray->sanitize (&sc) ||
(fdSelect == &Null (CFF1FDSelect)) || !fdSelect->sanitize (&sc, fdArray->count)))
{ fini (); return; }
goto fail;
fdCount = fdArray->count;
}
@ -1092,36 +1126,36 @@ struct cff1
encoding = &Null (Encoding);
if (is_CID ())
{
if (unlikely (charset == &Null (Charset))) { fini (); return; }
if (unlikely (charset == &Null (Charset))) goto fail;
}
else
{
if (!is_predef_encoding ())
{
encoding = &StructAtOffsetOrNull<Encoding> (cff, topDict.EncodingOffset);
if (unlikely ((encoding == &Null (Encoding)) || !encoding->sanitize (&sc))) { fini (); return; }
if (unlikely ((encoding == &Null (Encoding)) || !encoding->sanitize (&sc))) goto fail;
}
}
stringIndex = &StructAtOffset<CFF1StringIndex> (topDictIndex, topDictIndex->get_size ());
if ((stringIndex == &Null (CFF1StringIndex)) || !stringIndex->sanitize (&sc))
{ fini (); return; }
goto fail;
globalSubrs = &StructAtOffset<CFF1Subrs> (stringIndex, stringIndex->get_size ());
if ((globalSubrs != &Null (CFF1Subrs)) && !globalSubrs->sanitize (&sc))
{ fini (); return; }
goto fail;
charStrings = &StructAtOffsetOrNull<CFF1CharStrings> (cff, topDict.charStringsOffset);
if ((charStrings == &Null (CFF1CharStrings)) || unlikely (!charStrings->sanitize (&sc)))
{ fini (); return; }
goto fail;
num_glyphs = charStrings->count;
if (num_glyphs != sc.get_num_glyphs ())
{ fini (); return; }
goto fail;
if (unlikely (!privateDicts.resize (fdCount)))
{ fini (); return; }
goto fail;
for (unsigned int i = 0; i < fdCount; i++)
privateDicts[i].init ();
@ -1131,27 +1165,27 @@ struct cff1
for (unsigned int i = 0; i < fdCount; i++)
{
hb_ubytes_t fontDictStr = (*fdArray)[i];
if (unlikely (!fontDictStr.sanitize (&sc))) { fini (); return; }
if (unlikely (!fontDictStr.sanitize (&sc))) goto fail;
cff1_font_dict_values_t *font;
cff1_top_dict_interp_env_t env (fontDictStr);
cff1_font_dict_interpreter_t font_interp (env);
font = fontDicts.push ();
if (unlikely (fontDicts.in_error ())) { fini (); return; }
if (unlikely (fontDicts.in_error ())) goto fail;
font->init ();
if (unlikely (!font_interp.interpret (*font))) { fini (); return; }
if (unlikely (!font_interp.interpret (*font))) goto fail;
PRIVDICTVAL *priv = &privateDicts[i];
const hb_ubytes_t privDictStr = StructAtOffset<UnsizedByteStr> (cff, font->privateDictInfo.offset).as_ubytes (font->privateDictInfo.size);
if (unlikely (!privDictStr.sanitize (&sc))) { fini (); return; }
if (unlikely (!privDictStr.sanitize (&sc))) goto fail;
num_interp_env_t env2 (privDictStr);
dict_interpreter_t<PRIVOPSET, PRIVDICTVAL> priv_interp (env2);
priv->init ();
if (unlikely (!priv_interp.interpret (*priv))) { fini (); return; }
if (unlikely (!priv_interp.interpret (*priv))) goto fail;
priv->localSubrs = &StructAtOffsetOrNull<CFF1Subrs> (&privDictStr, priv->subrsOffset);
if (priv->localSubrs != &Null (CFF1Subrs) &&
unlikely (!priv->localSubrs->sanitize (&sc)))
{ fini (); return; }
goto fail;
}
}
else /* non-CID */
@ -1160,20 +1194,25 @@ struct cff1
PRIVDICTVAL *priv = &privateDicts[0];
const hb_ubytes_t privDictStr = StructAtOffset<UnsizedByteStr> (cff, font->privateDictInfo.offset).as_ubytes (font->privateDictInfo.size);
if (unlikely (!privDictStr.sanitize (&sc))) { fini (); return; }
if (unlikely (!privDictStr.sanitize (&sc))) goto fail;
num_interp_env_t env (privDictStr);
dict_interpreter_t<PRIVOPSET, PRIVDICTVAL> priv_interp (env);
priv->init ();
if (unlikely (!priv_interp.interpret (*priv))) { fini (); return; }
if (unlikely (!priv_interp.interpret (*priv))) goto fail;
priv->localSubrs = &StructAtOffsetOrNull<CFF1Subrs> (&privDictStr, priv->subrsOffset);
if (priv->localSubrs != &Null (CFF1Subrs) &&
unlikely (!priv->localSubrs->sanitize (&sc)))
{ fini (); return; }
goto fail;
}
}
void fini ()
return;
fail:
_fini ();
}
~accelerator_templ_t () { _fini (); }
void _fini ()
{
sc.end_processing ();
topDict.fini ();
@ -1183,6 +1222,8 @@ struct cff1
blob = nullptr;
}
hb_blob_t *get_blob () const { return blob; }
bool is_valid () const { return blob; }
bool is_CID () const { return topDict.is_CID (); }
@ -1203,13 +1244,14 @@ struct cff1
bool is_predef_encoding () const { return topDict.EncodingOffset <= ExpertEncoding; }
hb_codepoint_t glyph_to_code (hb_codepoint_t glyph) const
hb_codepoint_t glyph_to_code (hb_codepoint_t glyph,
code_pair_t *glyph_to_sid_cache = nullptr) const
{
if (encoding != &Null (Encoding))
return encoding->get_code (glyph);
else
{
hb_codepoint_t sid = glyph_to_sid (glyph);
hb_codepoint_t sid = glyph_to_sid (glyph, glyph_to_sid_cache);
if (sid == 0) return 0;
hb_codepoint_t code = 0;
switch (topDict.EncodingOffset)
@ -1227,12 +1269,14 @@ struct cff1
}
}
hb_map_t *create_glyph_to_sid_map () const
glyph_to_sid_map_t *create_glyph_to_sid_map () const
{
if (charset != &Null (Charset))
{
hb_map_t *mapping = hb_map_create ();
mapping->set (0, 0);
auto *mapping = (glyph_to_sid_map_t *) hb_malloc (sizeof (glyph_to_sid_map_t));
if (unlikely (!mapping)) return nullptr;
mapping = new (mapping) glyph_to_sid_map_t ();
mapping->push (code_pair_t {0, 1});
charset->collect_glyph_to_sid_map (mapping, num_glyphs);
return mapping;
}
@ -1240,10 +1284,11 @@ struct cff1
return nullptr;
}
hb_codepoint_t glyph_to_sid (hb_codepoint_t glyph) const
hb_codepoint_t glyph_to_sid (hb_codepoint_t glyph,
code_pair_t *cache = nullptr) const
{
if (charset != &Null (Charset))
return charset->get_sid (glyph, num_glyphs);
return charset->get_sid (glyph, num_glyphs, cache);
else
{
hb_codepoint_t sid = 0;
@ -1312,19 +1357,17 @@ struct cff1
hb_vector_t<PRIVDICTVAL> privateDicts;
unsigned int num_glyphs = 0;
unsigned int num_charset_entries = 0;
};
struct accelerator_t : accelerator_templ_t<cff1_private_dict_opset_t, cff1_private_dict_values_t>
{
accelerator_t (hb_face_t *face)
accelerator_t (hb_face_t *face) : SUPER (face)
{
SUPER::init (face);
glyph_names.set_relaxed (nullptr);
if (!is_valid ()) return;
if (is_CID ()) return;
}
~accelerator_t ()
{
@ -1334,8 +1377,6 @@ struct cff1
names->fini ();
hb_free (names);
}
SUPER::fini ();
}
bool get_glyph_name (hb_codepoint_t glyph,
@ -1386,9 +1427,10 @@ struct cff1
/* TODO */
/* fill glyph names */
code_pair_t glyph_to_sid_cache {0, HB_CODEPOINT_INVALID};
for (hb_codepoint_t gid = 0; gid < num_glyphs; gid++)
{
hb_codepoint_t sid = glyph_to_sid (gid);
hb_codepoint_t sid = glyph_to_sid (gid, &glyph_to_sid_cache);
gname_t gname;
gname.sid = sid;
if (sid < cff1_std_strings_length)
@ -1426,7 +1468,6 @@ struct cff1
HB_INTERNAL bool get_extents (hb_font_t *font, hb_codepoint_t glyph, hb_glyph_extents_t *extents) const;
HB_INTERNAL bool paint_glyph (hb_font_t *font, hb_codepoint_t glyph, hb_paint_funcs_t *funcs, void *data, hb_color_t foreground) const;
HB_INTERNAL bool get_seac_components (hb_codepoint_t glyph, hb_codepoint_t *base, hb_codepoint_t *accent) const;
HB_INTERNAL bool get_path (hb_font_t *font, hb_codepoint_t glyph, hb_draw_session_t &draw_session) const;
private:
@ -1453,9 +1494,24 @@ struct cff1
typedef accelerator_templ_t<cff1_private_dict_opset_t, cff1_private_dict_values_t> SUPER;
};
struct accelerator_subset_t : accelerator_templ_t<cff1_private_dict_opset_subset, cff1_private_dict_values_subset_t> {};
struct accelerator_subset_t : accelerator_templ_t<cff1_private_dict_opset_subset_t, cff1_private_dict_values_subset_t>
{
accelerator_subset_t (hb_face_t *face) : SUPER (face) {}
~accelerator_subset_t ()
{
if (cff_accelerator)
cff_subset_accelerator_t::destroy (cff_accelerator);
}
bool subset (hb_subset_context_t *c) const { return hb_subset_cff1 (c); }
HB_INTERNAL bool subset (hb_subset_context_t *c) const;
HB_INTERNAL bool serialize (hb_serialize_context_t *c,
struct cff1_subset_plan &plan) const;
HB_INTERNAL bool get_seac_components (hb_codepoint_t glyph, hb_codepoint_t *base, hb_codepoint_t *accent) const;
mutable CFF::cff_subset_accelerator_t* cff_accelerator = nullptr;
typedef accelerator_templ_t<cff1_private_dict_opset_subset_t, cff1_private_dict_values_subset_t> SUPER;
};
protected:
HB_INTERNAL static hb_codepoint_t lookup_standard_encoding_for_code (hb_codepoint_t sid);
@ -1479,6 +1535,10 @@ struct cff1_accelerator_t : cff1::accelerator_t {
cff1_accelerator_t (hb_face_t *face) : cff1::accelerator_t (face) {}
};
struct cff1_subset_accelerator_t : cff1::accelerator_subset_t {
cff1_subset_accelerator_t (hb_face_t *face) : cff1::accelerator_subset_t (face) {}
};
} /* namespace OT */
#endif /* HB_OT_CFF1_TABLE_HH */

View File

@ -28,7 +28,7 @@
#define HB_OT_CFF2_TABLE_HH
#include "hb-ot-cff-common.hh"
#include "hb-subset-cff2.hh"
#include "hb-subset-cff-common.hh"
#include "hb-draw.hh"
#include "hb-paint.hh"
@ -38,10 +38,9 @@ namespace CFF {
* CFF2 -- Compact Font Format (CFF) Version 2
* https://docs.microsoft.com/en-us/typography/opentype/spec/cff2
*/
#define HB_OT_TAG_cff2 HB_TAG('C','F','F','2')
#define HB_OT_TAG_CFF2 HB_TAG('C','F','F','2')
typedef CFFIndex<HBUINT32> CFF2Index;
template <typename Type> struct CFF2IndexOf : CFFIndexOf<HBUINT32, Type> {};
typedef CFF2Index CFF2CharStrings;
typedef Subrs<HBUINT32> CFF2Subrs;
@ -379,7 +378,7 @@ using namespace CFF;
struct cff2
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_cff2;
static constexpr hb_tag_t tableTag = HB_OT_TAG_CFF2;
bool sanitize (hb_sanitize_context_t *c) const
{
@ -391,8 +390,12 @@ struct cff2
template <typename PRIVOPSET, typename PRIVDICTVAL>
struct accelerator_templ_t
{
static constexpr hb_tag_t tableTag = cff2::tableTag;
accelerator_templ_t (hb_face_t *face)
{
if (!face) return;
topDict.init ();
fontDicts.init ();
privateDicts.init ();
@ -464,7 +467,6 @@ struct cff2
goto fail;
}
return;
fail:
@ -481,11 +483,13 @@ struct cff2
blob = nullptr;
}
hb_map_t *create_glyph_to_sid_map () const
hb_vector_t<uint16_t> *create_glyph_to_sid_map () const
{
return nullptr;
}
hb_blob_t *get_blob () const { return blob; }
bool is_valid () const { return blob; }
protected:
@ -518,9 +522,24 @@ struct cff2
HB_INTERNAL bool get_path (hb_font_t *font, hb_codepoint_t glyph, hb_draw_session_t &draw_session) const;
};
typedef accelerator_templ_t<cff2_private_dict_opset_subset_t, cff2_private_dict_values_subset_t> accelerator_subset_t;
struct accelerator_subset_t : accelerator_templ_t<cff2_private_dict_opset_subset_t, cff2_private_dict_values_subset_t>
{
accelerator_subset_t (hb_face_t *face) : SUPER (face) {}
~accelerator_subset_t ()
{
if (cff_accelerator)
cff_subset_accelerator_t::destroy (cff_accelerator);
}
bool subset (hb_subset_context_t *c) const { return hb_subset_cff2 (c); }
HB_INTERNAL bool subset (hb_subset_context_t *c) const;
HB_INTERNAL bool serialize (hb_serialize_context_t *c,
struct cff2_subset_plan &plan,
hb_array_t<int> normalized_coords) const;
mutable CFF::cff_subset_accelerator_t* cff_accelerator = nullptr;
typedef accelerator_templ_t<cff2_private_dict_opset_subset_t, cff2_private_dict_values_subset_t> SUPER;
};
public:
FixedVersion<HBUINT8> version; /* Version of CFF2 table. set to 0x0200u */
@ -535,6 +554,10 @@ struct cff2_accelerator_t : cff2::accelerator_t {
cff2_accelerator_t (hb_face_t *face) : cff2::accelerator_t (face) {}
};
struct cff2_subset_accelerator_t : cff2::accelerator_subset_t {
cff2_subset_accelerator_t (hb_face_t *face) : cff2::accelerator_subset_t (face) {}
};
} /* namespace OT */
#endif /* HB_OT_CFF2_TABLE_HH */

View File

@ -277,10 +277,10 @@ struct CmapSubtableFormat4
}
} writer(c);
writer.end_code_ = c->allocate_size<HBUINT16> (HBUINT16::static_size * segcount);
c->allocate_size<HBUINT16> (2); // padding
writer.start_code_ = c->allocate_size<HBUINT16> (HBUINT16::static_size * segcount);
writer.id_delta_ = c->allocate_size<HBINT16> (HBINT16::static_size * segcount);
writer.end_code_ = c->allocate_size<HBUINT16> (HBUINT16::static_size * segcount, false);
(void) c->allocate_size<HBUINT16> (2); // padding
writer.start_code_ = c->allocate_size<HBUINT16> (HBUINT16::static_size * segcount, false);
writer.id_delta_ = c->allocate_size<HBINT16> (HBINT16::static_size * segcount, false);
if (unlikely (!writer.end_code_ || !writer.start_code_ || !writer.id_delta_)) return false;
@ -325,7 +325,7 @@ struct CmapSubtableFormat4
{
auto format4_iter =
+ it
| hb_filter ([&] (const hb_pair_t<hb_codepoint_t, hb_codepoint_t> _)
| hb_filter ([&] (const hb_codepoint_pair_t _)
{ return _.first <= 0xFFFF; })
;
@ -335,7 +335,7 @@ struct CmapSubtableFormat4
if (unlikely (!c->extend_min (this))) return;
this->format = 4;
hb_vector_t<hb_pair_t<hb_codepoint_t, hb_codepoint_t>> cp_to_gid {
hb_vector_t<hb_codepoint_pair_t> cp_to_gid {
format4_iter
};
@ -757,8 +757,7 @@ struct CmapSubtableLongSegmented
hb_codepoint_t gid = this->groups[i].glyphID;
if (!gid)
{
/* Intention is: if (hb_is_same (T, CmapSubtableFormat13)) continue; */
if (! T::group_get_glyph (this->groups[i], end)) continue;
if (T::formatNumber == 13) continue;
start++;
gid++;
}
@ -766,11 +765,13 @@ struct CmapSubtableLongSegmented
if (unlikely ((unsigned int) (gid + end - start) >= num_glyphs))
end = start + (hb_codepoint_t) num_glyphs - gid;
mapping->alloc (mapping->get_population () + end - start + 1);
for (unsigned cp = start; cp <= end; cp++)
{
unicodes->add (cp);
mapping->set (cp, gid);
gid++;
gid += T::increment;
}
}
}
@ -794,6 +795,9 @@ struct CmapSubtableLongSegmented
struct CmapSubtableFormat12 : CmapSubtableLongSegmented<CmapSubtableFormat12>
{
static constexpr int increment = 1;
static constexpr int formatNumber = 12;
static hb_codepoint_t group_get_glyph (const CmapSubtableLongGroup &group,
hb_codepoint_t u)
{ return likely (group.startCharCode <= group.endCharCode) ?
@ -866,6 +870,9 @@ struct CmapSubtableFormat12 : CmapSubtableLongSegmented<CmapSubtableFormat12>
struct CmapSubtableFormat13 : CmapSubtableLongSegmented<CmapSubtableFormat13>
{
static constexpr int increment = 0;
static constexpr int formatNumber = 13;
static hb_codepoint_t group_get_glyph (const CmapSubtableLongGroup &group,
hb_codepoint_t u HB_UNUSED)
{ return group.glyphID; }
@ -917,8 +924,7 @@ struct DefaultUVS : SortedArray32Of<UnicodeValueRange>
DefaultUVS* copy (hb_serialize_context_t *c,
const hb_set_t *unicodes) const
{
DefaultUVS *out = c->start_embed<DefaultUVS> ();
if (unlikely (!out)) return nullptr;
auto *out = c->start_embed<DefaultUVS> ();
auto snap = c->snapshot ();
HBUINT32 len;
@ -931,8 +937,7 @@ struct DefaultUVS : SortedArray32Of<UnicodeValueRange>
hb_codepoint_t start = HB_SET_VALUE_INVALID;
hb_codepoint_t end = HB_SET_VALUE_INVALID;
for (hb_codepoint_t u = HB_SET_VALUE_INVALID;
unicodes->next (&u);)
for (auto u : *unicodes)
{
if (!as_array ().bsearch (u))
continue;
@ -1067,9 +1072,7 @@ struct NonDefaultUVS : SortedArray32Of<UVSMapping>
const hb_set_t *glyphs_requested,
const hb_map_t *glyph_map) const
{
NonDefaultUVS *out = c->start_embed<NonDefaultUVS> ();
if (unlikely (!out)) return nullptr;
auto *out = c->start_embed<NonDefaultUVS> ();
auto it =
+ as_array ()
| hb_filter ([&] (const UVSMapping& _)
@ -1767,7 +1770,6 @@ struct cmap
TRACE_SUBSET (this);
cmap *cmap_prime = c->serializer->start_embed<cmap> ();
if (unlikely (!c->serializer->check_success (cmap_prime))) return_trace (false);
auto encodingrec_iter =
+ hb_iter (encodingRecord)
@ -1798,7 +1800,7 @@ struct cmap
auto it =
+ c->plan->unicode_to_new_gid_list.iter ()
| hb_filter ([&] (const hb_pair_t<hb_codepoint_t, hb_codepoint_t> _)
| hb_filter ([&] (const hb_codepoint_pair_t _)
{ return (_.second != HB_MAP_VALUE_INVALID); })
;

Some files were not shown because too many files have changed in this diff Show More