8301998: Update HarfBuzz to 7.0.1

Reviewed-by: erikj, prr
This commit is contained in:
Harshitha Onkar 2023-03-14 20:18:44 +00:00
parent 617c15f5a1
commit f5c8b68c1c
267 changed files with 28939 additions and 15638 deletions

View File

@ -451,7 +451,7 @@ else
-DHB_NO_PRAGMA_GCC_DIAGNOSTIC
endif
ifeq ($(call isTargetOs, linux macosx), true)
HARFBUZZ_CFLAGS += -DHAVE_INTEL_ATOMIC_PRIMITIVES
HARFBUZZ_CFLAGS += -DHAVE_INTEL_ATOMIC_PRIMITIVES -DHB_NO_VISIBILITY
endif
# Early re-canonizing has to be disabled to workaround an internal XlC compiler error

View File

@ -1,8 +1,8 @@
## Harfbuzz v4.4.1
## Harfbuzz v7.0.1
### Harfbuzz License
https://github.com/harfbuzz/harfbuzz/blob/4.4.1/COPYING
https://github.com/harfbuzz/harfbuzz/blob/7.0.1/COPYING
<pre>
@ -12,21 +12,22 @@ files names COPYING in subdirectories where applicable.
Copyright © 2010-2022 Google, Inc.
Copyright © 2018-2020 Ebrahim Byagowi
Copyright © 2019-2020 Facebook, Inc.
Copyright © 2012-2015 Mozilla Foundation.
Copyright © 2011 Codethink Limited
Copyright © 2008-2010 Nokia Corporation and/or its subsidiary(-ies)
Copyright © 2009 Keith Stribley
Copyright © 2009 Martin Hosken and SIL International
Copyright © 2007 Chris Wilson
Copyright © 2005-2022 Behdad Esfahbod
Copyright © 2005 David Turner
Copyright © 2004-2013 Red Hat, Inc.
Copyright © 1998-2004 David Turner and Werner Lemberg
Copyright © 2016 Elie Roux <elie.roux@telecom-bretagne.eu>
Copyright © 2019 Facebook, Inc.
Copyright © 2007 Chris Wilson
Copyright © 2018-2019 Adobe Inc.
Copyright © 2006-2023 Behdad Esfahbod
Copyright © 1998-2004 David Turner and Werner Lemberg
Copyright © 2009 Keith Stribley
Copyright © 2018 Khaled Hosny
Copyright © 2016 Elie Roux <elie.roux@telecom-bretagne.eu>
Copyright © 2016 Igalia S.L.
Copyright © 2015 Mozilla Foundation.
Copyright © 1999 David Turner
Copyright © 2005 Werner Lemberg
Copyright © 2013-2015 Alexei Podtelezhnikov
Copyright © 2022 Matthias Clasen
Copyright © 2011 Codethink Limited
For full copyright notices consult the individual files in the package.
@ -72,3 +73,23 @@ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
</pre>
### AUTHORS File Information
```
Behdad Esfahbod
David Corbett
David Turner
Ebrahim Byagowi
Garret Rieger
Jonathan Kew
Khaled Hosny
Lars Knoll
Martin Hosken
Owen Taylor
Roderick Sheeter
Roozbeh Pournader
Simon Hausmann
Werner Lemberg
```

View File

@ -24,10 +24,11 @@
* Google Author(s): Seigo Nonaka, Calder Kitagawa
*/
#ifndef HB_OT_COLOR_CBDT_TABLE_HH
#define HB_OT_COLOR_CBDT_TABLE_HH
#ifndef OT_COLOR_CBDT_CBDT_HH
#define OT_COLOR_CBDT_CBDT_HH
#include "hb-open-type.hh"
#include "../../../hb-open-type.hh"
#include "../../../hb-paint.hh"
/*
* CBLC -- Color Bitmap Location
@ -67,7 +68,7 @@ _copy_data_to_cbdt (hb_vector_t<char> *cbdt_prime,
{
unsigned int new_len = cbdt_prime->length + length;
if (unlikely (!cbdt_prime->alloc (new_len))) return false;
memcpy (cbdt_prime->arrayZ + cbdt_prime->length, data, length);
hb_memcpy (cbdt_prime->arrayZ + cbdt_prime->length, data, length);
cbdt_prime->length = new_len;
return true;
}
@ -80,12 +81,15 @@ struct SmallGlyphMetrics
return_trace (c->check_struct (this));
}
void get_extents (hb_font_t *font, hb_glyph_extents_t *extents) const
void get_extents (hb_font_t *font, hb_glyph_extents_t *extents, bool scale) const
{
extents->x_bearing = font->em_scale_x (bearingX);
extents->y_bearing = font->em_scale_y (bearingY);
extents->width = font->em_scale_x (width);
extents->height = font->em_scale_y (-static_cast<int>(height));
extents->x_bearing = bearingX;
extents->y_bearing = bearingY;
extents->width = width;
extents->height = -static_cast<int> (height);
if (scale)
font->scale_glyph_extents (extents);
}
HBUINT8 height;
@ -307,7 +311,7 @@ struct IndexSubtable
}
}
bool get_extents (hb_glyph_extents_t *extents HB_UNUSED) const
bool get_extents (hb_glyph_extents_t *extents HB_UNUSED, bool scale HB_UNUSED) const
{
switch (u.header.indexFormat)
{
@ -468,13 +472,13 @@ struct IndexSubtableRecord
if (unlikely (!c->serializer->check_success (records->resize (records->length + 1))))
return_trace (false);
(*records)[records->length - 1].firstGlyphIndex = 1;
(*records)[records->length - 1].lastGlyphIndex = 0;
records->tail ().firstGlyphIndex = 1;
records->tail ().lastGlyphIndex = 0;
bitmap_size_context->size += IndexSubtableRecord::min_size;
c->serializer->push ();
if (unlikely (!add_new_subtable (c, bitmap_size_context, &((*records)[records->length - 1]), lookup, base, start)))
if (unlikely (!add_new_subtable (c, bitmap_size_context, &(records->tail ()), lookup, base, start)))
{
c->serializer->pop_discard ();
c->serializer->revert (snap);
@ -504,8 +508,8 @@ struct IndexSubtableRecord
return num_missing;
}
bool get_extents (hb_glyph_extents_t *extents, const void *base) const
{ return (base+offsetToSubtable).get_extents (extents); }
bool get_extents (hb_glyph_extents_t *extents, const void *base, bool scale) const
{ return (base+offsetToSubtable).get_extents (extents, scale); }
bool get_image_data (unsigned int gid,
const void *base,
@ -833,7 +837,7 @@ struct CBDT
}
bool
get_extents (hb_font_t *font, hb_codepoint_t glyph, hb_glyph_extents_t *extents) const
get_extents (hb_font_t *font, hb_codepoint_t glyph, hb_glyph_extents_t *extents, bool scale = true) const
{
const void *base;
const BitmapSizeTable &strike = this->cblc->choose_strike (font);
@ -841,7 +845,7 @@ struct CBDT
if (!subtable_record || !strike.ppemX || !strike.ppemY)
return false;
if (subtable_record->get_extents (extents, base))
if (subtable_record->get_extents (extents, base, scale))
return true;
unsigned int image_offset = 0, image_length = 0, image_format = 0;
@ -858,26 +862,29 @@ struct CBDT
if (unlikely (image_length < GlyphBitmapDataFormat17::min_size))
return false;
auto &glyphFormat17 = StructAtOffset<GlyphBitmapDataFormat17> (this->cbdt, image_offset);
glyphFormat17.glyphMetrics.get_extents (font, extents);
glyphFormat17.glyphMetrics.get_extents (font, extents, scale);
break;
}
case 18: {
if (unlikely (image_length < GlyphBitmapDataFormat18::min_size))
return false;
auto &glyphFormat18 = StructAtOffset<GlyphBitmapDataFormat18> (this->cbdt, image_offset);
glyphFormat18.glyphMetrics.get_extents (font, extents);
glyphFormat18.glyphMetrics.get_extents (font, extents, scale);
break;
}
default: return false; /* TODO: Support other image formats. */
}
/* Convert to font units. */
float x_scale = upem / (float) strike.ppemX;
float y_scale = upem / (float) strike.ppemY;
extents->x_bearing = roundf (extents->x_bearing * x_scale);
extents->y_bearing = roundf (extents->y_bearing * y_scale);
extents->width = roundf (extents->width * x_scale);
extents->height = roundf (extents->height * y_scale);
if (scale)
{
float x_scale = upem / (float) strike.ppemX;
float y_scale = upem / (float) strike.ppemY;
extents->x_bearing = roundf (extents->x_bearing * x_scale);
extents->y_bearing = roundf (extents->y_bearing * y_scale);
extents->width = roundf (extents->width * x_scale);
extents->height = roundf (extents->height * y_scale);
}
return true;
}
@ -934,6 +941,32 @@ struct CBDT
bool has_data () const { return cbdt.get_length (); }
bool paint_glyph (hb_font_t *font, hb_codepoint_t glyph, hb_paint_funcs_t *funcs, void *data) const
{
hb_glyph_extents_t extents;
hb_glyph_extents_t pixel_extents;
hb_blob_t *blob = reference_png (font, glyph);
if (unlikely (blob == hb_blob_get_empty ()))
return false;
if (unlikely (!hb_font_get_glyph_extents (font, glyph, &extents)))
return false;
if (unlikely (!get_extents (font, glyph, &pixel_extents, false)))
return false;
bool ret = funcs->image (data,
blob,
pixel_extents.width, -pixel_extents.height,
HB_PAINT_IMAGE_FORMAT_PNG,
font->slant_xy,
&extents);
hb_blob_destroy (blob);
return ret;
}
private:
hb_blob_ptr_t<CBLC> cblc;
hb_blob_ptr_t<CBDT> cbdt;
@ -994,4 +1027,4 @@ struct CBDT_accelerator_t : CBDT::accelerator_t {
} /* namespace OT */
#endif /* HB_OT_COLOR_CBDT_TABLE_HH */
#endif /* OT_COLOR_CBDT_CBDT_HH */

View File

@ -24,12 +24,11 @@
*
*/
#ifndef HB_OT_COLR_COLRV1_CLOSURE_HH
#define HB_OT_COLR_COLRV1_CLOSURE_HH
#ifndef OT_COLOR_COLR_COLRV1_CLOSURE_HH
#define OT_COLOR_COLR_COLRV1_CLOSURE_HH
#include "hb-open-type.hh"
#include "hb-ot-layout-common.hh"
#include "hb-ot-color-colr-table.hh"
#include "../../../hb-open-type.hh"
#include "COLR.hh"
/*
* COLR -- Color
@ -105,4 +104,4 @@ HB_INTERNAL void PaintComposite::closurev1 (hb_colrv1_closure_context_t* c) cons
} /* namespace OT */
#endif /* HB_OT_COLR_COLRV1_CLOSURE_HH */
#endif /* OT_COLOR_COLR_COLRV1_CLOSURE_HH */

View File

@ -25,12 +25,12 @@
* Google Author(s): Sascha Brawer
*/
#ifndef HB_OT_COLOR_CPAL_TABLE_HH
#define HB_OT_COLOR_CPAL_TABLE_HH
#ifndef OT_COLOR_CPAL_CPAL_HH
#define OT_COLOR_CPAL_CPAL_HH
#include "hb-open-type.hh"
#include "hb-ot-color.h"
#include "hb-ot-name.h"
#include "../../../hb-open-type.hh"
#include "../../../hb-ot-color.h"
#include "../../../hb-ot-name.h"
/*
@ -239,7 +239,7 @@ struct CPAL
TRACE_SUBSET (this);
if (!numPalettes) return_trace (false);
const hb_map_t *color_index_map = c->plan->colr_palettes;
const hb_map_t *color_index_map = &c->plan->colr_palettes;
if (color_index_map->is_empty ()) return_trace (false);
hb_set_t retained_color_indices;
@ -319,4 +319,4 @@ struct CPAL
} /* namespace OT */
#endif /* HB_OT_COLOR_CPAL_TABLE_HH */
#endif /* OT_COLOR_CPAL_CPAL_HH */

View File

@ -25,11 +25,11 @@
* Google Author(s): Calder Kitagawa
*/
#ifndef HB_OT_COLOR_SBIX_TABLE_HH
#define HB_OT_COLOR_SBIX_TABLE_HH
#ifndef OT_COLOR_SBIX_SBIX_HH
#define OT_COLOR_SBIX_SBIX_HH
#include "hb-open-type.hh"
#include "hb-ot-layout-common.hh"
#include "../../../hb-open-type.hh"
#include "../../../hb-paint.hh"
/*
* sbix -- Standard Bitmap Graphics
@ -213,10 +213,11 @@ struct sbix
bool get_extents (hb_font_t *font,
hb_codepoint_t glyph,
hb_glyph_extents_t *extents) const
hb_glyph_extents_t *extents,
bool scale = true) const
{
/* We only support PNG right now, and following function checks type. */
return get_png_extents (font, glyph, extents);
return get_png_extents (font, glyph, extents, scale);
}
hb_blob_t *reference_png (hb_font_t *font,
@ -231,6 +232,37 @@ struct sbix
num_glyphs, available_ppem);
}
bool paint_glyph (hb_font_t *font, hb_codepoint_t glyph, hb_paint_funcs_t *funcs, void *data) const
{
if (!has_data ())
return false;
int x_offset = 0, y_offset = 0;
unsigned int strike_ppem = 0;
hb_blob_t *blob = reference_png (font, glyph, &x_offset, &y_offset, &strike_ppem);
hb_glyph_extents_t extents;
hb_glyph_extents_t pixel_extents;
if (blob == hb_blob_get_empty ())
return false;
if (!hb_font_get_glyph_extents (font, glyph, &extents))
return false;
if (unlikely (!get_extents (font, glyph, &pixel_extents, false)))
return false;
bool ret = funcs->image (data,
blob,
pixel_extents.width, -pixel_extents.height,
HB_PAINT_IMAGE_FORMAT_PNG,
font->slant_xy,
&extents);
hb_blob_destroy (blob);
return ret;
}
private:
const SBIXStrike &choose_strike (hb_font_t *font) const
@ -285,7 +317,8 @@ struct sbix
bool get_png_extents (hb_font_t *font,
hb_codepoint_t glyph,
hb_glyph_extents_t *extents) const
hb_glyph_extents_t *extents,
bool scale = true) const
{
/* Following code is safe to call even without data.
* But faster to short-circuit. */
@ -310,22 +343,18 @@ struct sbix
extents->height = -1 * png.IHDR.height;
/* Convert to font units. */
if (strike_ppem)
if (strike_ppem && scale)
{
float scale = font->face->get_upem () / (float) strike_ppem;
extents->x_bearing = font->em_scalef_x (extents->x_bearing * scale);
extents->y_bearing = font->em_scalef_y (extents->y_bearing * scale);
extents->width = font->em_scalef_x (extents->width * scale);
extents->height = font->em_scalef_y (extents->height * scale);
}
else
{
extents->x_bearing = font->em_scale_x (extents->x_bearing);
extents->y_bearing = font->em_scale_y (extents->y_bearing);
extents->width = font->em_scale_x (extents->width);
extents->height = font->em_scale_y (extents->height);
extents->x_bearing = roundf (extents->x_bearing * scale);
extents->y_bearing = roundf (extents->y_bearing * scale);
extents->width = roundf (extents->width * scale);
extents->height = roundf (extents->height * scale);
}
if (scale)
font->scale_glyph_extents (extents);
hb_blob_destroy (blob);
return strike_ppem;
@ -420,4 +449,4 @@ struct sbix_accelerator_t : sbix::accelerator_t {
} /* namespace OT */
#endif /* HB_OT_COLOR_SBIX_TABLE_HH */
#endif /* OT_COLOR_SBIX_SBIX_HH */

View File

@ -22,10 +22,12 @@
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*/
#ifndef HB_OT_COLOR_SVG_TABLE_HH
#define HB_OT_COLOR_SVG_TABLE_HH
#ifndef OT_COLOR_SVG_SVG_HH
#define OT_COLOR_SVG_SVG_HH
#include "hb-open-type.hh"
#include "../../../hb-open-type.hh"
#include "../../../hb-blob.hh"
#include "../../../hb-paint.hh"
/*
* SVG -- SVG (Scalable Vector Graphics)
@ -91,8 +93,31 @@ struct SVG
bool has_data () const { return table->has_data (); }
bool paint_glyph (hb_font_t *font HB_UNUSED, hb_codepoint_t glyph, hb_paint_funcs_t *funcs, void *data) const
{
if (!has_data ())
return false;
hb_blob_t *blob = reference_blob_for_glyph (glyph);
if (blob == hb_blob_get_empty ())
return false;
funcs->image (data,
blob,
0, 0,
HB_PAINT_IMAGE_FORMAT_SVG,
font->slant_xy,
nullptr);
hb_blob_destroy (blob);
return true;
}
private:
hb_blob_ptr_t<SVG> table;
public:
DEFINE_SIZE_STATIC (sizeof (hb_blob_ptr_t<SVG>));
};
const SVGDocumentIndexEntry &get_glyph_entry (hb_codepoint_t glyph_id) const
@ -123,4 +148,4 @@ struct SVG_accelerator_t : SVG::accelerator_t {
} /* namespace OT */
#endif /* HB_OT_COLOR_SVG_TABLE_HH */
#endif /* OT_COLOR_SVG_SVG_HH */

View File

@ -0,0 +1,337 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod, Garret Rieger
*/
#ifndef OT_LAYOUT_COMMON_COVERAGE_HH
#define OT_LAYOUT_COMMON_COVERAGE_HH
#include "../types.hh"
#include "CoverageFormat1.hh"
#include "CoverageFormat2.hh"
namespace OT {
namespace Layout {
namespace Common {
template<typename Iterator>
static inline void Coverage_serialize (hb_serialize_context_t *c,
Iterator it);
struct Coverage
{
protected:
union {
HBUINT16 format; /* Format identifier */
CoverageFormat1_3<SmallTypes> format1;
CoverageFormat2_4<SmallTypes> format2;
#ifndef HB_NO_BEYOND_64K
CoverageFormat1_3<MediumTypes>format3;
CoverageFormat2_4<MediumTypes>format4;
#endif
} u;
public:
DEFINE_SIZE_UNION (2, format);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
switch (u.format)
{
case 1: return_trace (u.format1.sanitize (c));
case 2: return_trace (u.format2.sanitize (c));
#ifndef HB_NO_BEYOND_64K
case 3: return_trace (u.format3.sanitize (c));
case 4: return_trace (u.format4.sanitize (c));
#endif
default:return_trace (true);
}
}
/* Has interface. */
unsigned operator [] (hb_codepoint_t k) const { return get (k); }
bool has (hb_codepoint_t k) const { return (*this)[k] != NOT_COVERED; }
/* Predicate. */
bool operator () (hb_codepoint_t k) const { return has (k); }
unsigned int get (hb_codepoint_t k) const { return get_coverage (k); }
unsigned int get_coverage (hb_codepoint_t glyph_id) const
{
switch (u.format) {
case 1: return u.format1.get_coverage (glyph_id);
case 2: return u.format2.get_coverage (glyph_id);
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.get_coverage (glyph_id);
case 4: return u.format4.get_coverage (glyph_id);
#endif
default:return NOT_COVERED;
}
}
unsigned get_population () const
{
switch (u.format) {
case 1: return u.format1.get_population ();
case 2: return u.format2.get_population ();
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.get_population ();
case 4: return u.format4.get_population ();
#endif
default:return NOT_COVERED;
}
}
template <typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, Iterator glyphs)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
unsigned count = 0;
unsigned num_ranges = 0;
hb_codepoint_t last = (hb_codepoint_t) -2;
for (auto g: glyphs)
{
if (last + 1 != g)
num_ranges++;
last = g;
count++;
}
u.format = count <= num_ranges * 3 ? 1 : 2;
#ifndef HB_NO_BEYOND_64K
if (count && last > 0xFFFFu)
u.format += 2;
#endif
switch (u.format)
{
case 1: return_trace (u.format1.serialize (c, glyphs));
case 2: return_trace (u.format2.serialize (c, glyphs));
#ifndef HB_NO_BEYOND_64K
case 3: return_trace (u.format3.serialize (c, glyphs));
case 4: return_trace (u.format4.serialize (c, glyphs));
#endif
default:return_trace (false);
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto it =
+ iter ()
| hb_take (c->plan->source->get_num_glyphs ())
| hb_filter (c->plan->glyph_map_gsub)
| hb_map_retains_sorting (c->plan->glyph_map_gsub)
;
// Cache the iterator result as it will be iterated multiple times
// by the serialize code below.
hb_sorted_vector_t<hb_codepoint_t> glyphs (it);
Coverage_serialize (c->serializer, glyphs.iter ());
return_trace (bool (glyphs));
}
bool intersects (const hb_set_t *glyphs) const
{
switch (u.format)
{
case 1: return u.format1.intersects (glyphs);
case 2: return u.format2.intersects (glyphs);
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.intersects (glyphs);
case 4: return u.format4.intersects (glyphs);
#endif
default:return false;
}
}
bool intersects_coverage (const hb_set_t *glyphs, unsigned int index) const
{
switch (u.format)
{
case 1: return u.format1.intersects_coverage (glyphs, index);
case 2: return u.format2.intersects_coverage (glyphs, index);
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.intersects_coverage (glyphs, index);
case 4: return u.format4.intersects_coverage (glyphs, index);
#endif
default:return false;
}
}
/* Might return false if array looks unsorted.
* Used for faster rejection of corrupt data. */
template <typename set_t>
bool collect_coverage (set_t *glyphs) const
{
switch (u.format)
{
case 1: return u.format1.collect_coverage (glyphs);
case 2: return u.format2.collect_coverage (glyphs);
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.collect_coverage (glyphs);
case 4: return u.format4.collect_coverage (glyphs);
#endif
default:return false;
}
}
template <typename IterableOut,
hb_requires (hb_is_sink_of (IterableOut, hb_codepoint_t))>
void intersect_set (const hb_set_t &glyphs, IterableOut&& intersect_glyphs) const
{
switch (u.format)
{
case 1: return u.format1.intersect_set (glyphs, intersect_glyphs);
case 2: return u.format2.intersect_set (glyphs, intersect_glyphs);
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.intersect_set (glyphs, intersect_glyphs);
case 4: return u.format4.intersect_set (glyphs, intersect_glyphs);
#endif
default:return ;
}
}
struct iter_t : hb_iter_with_fallback_t<iter_t, hb_codepoint_t>
{
static constexpr bool is_sorted_iterator = true;
iter_t (const Coverage &c_ = Null (Coverage))
{
hb_memset (this, 0, sizeof (*this));
format = c_.u.format;
switch (format)
{
case 1: u.format1.init (c_.u.format1); return;
case 2: u.format2.init (c_.u.format2); return;
#ifndef HB_NO_BEYOND_64K
case 3: u.format3.init (c_.u.format3); return;
case 4: u.format4.init (c_.u.format4); return;
#endif
default: return;
}
}
bool __more__ () const
{
switch (format)
{
case 1: return u.format1.__more__ ();
case 2: return u.format2.__more__ ();
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.__more__ ();
case 4: return u.format4.__more__ ();
#endif
default:return false;
}
}
void __next__ ()
{
switch (format)
{
case 1: u.format1.__next__ (); break;
case 2: u.format2.__next__ (); break;
#ifndef HB_NO_BEYOND_64K
case 3: u.format3.__next__ (); break;
case 4: u.format4.__next__ (); break;
#endif
default: break;
}
}
typedef hb_codepoint_t __item_t__;
__item_t__ __item__ () const { return get_glyph (); }
hb_codepoint_t get_glyph () const
{
switch (format)
{
case 1: return u.format1.get_glyph ();
case 2: return u.format2.get_glyph ();
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.get_glyph ();
case 4: return u.format4.get_glyph ();
#endif
default:return 0;
}
}
bool operator != (const iter_t& o) const
{
if (unlikely (format != o.format)) return true;
switch (format)
{
case 1: return u.format1 != o.u.format1;
case 2: return u.format2 != o.u.format2;
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3 != o.u.format3;
case 4: return u.format4 != o.u.format4;
#endif
default:return false;
}
}
iter_t __end__ () const
{
iter_t it = {};
it.format = format;
switch (format)
{
case 1: it.u.format1 = u.format1.__end__ (); break;
case 2: it.u.format2 = u.format2.__end__ (); break;
#ifndef HB_NO_BEYOND_64K
case 3: it.u.format3 = u.format3.__end__ (); break;
case 4: it.u.format4 = u.format4.__end__ (); break;
#endif
default: break;
}
return it;
}
private:
unsigned int format;
union {
#ifndef HB_NO_BEYOND_64K
CoverageFormat2_4<MediumTypes>::iter_t format4; /* Put this one first since it's larger; helps shut up compiler. */
CoverageFormat1_3<MediumTypes>::iter_t format3;
#endif
CoverageFormat2_4<SmallTypes>::iter_t format2; /* Put this one first since it's larger; helps shut up compiler. */
CoverageFormat1_3<SmallTypes>::iter_t format1;
} u;
};
iter_t iter () const { return iter_t (*this); }
};
template<typename Iterator>
static inline void
Coverage_serialize (hb_serialize_context_t *c,
Iterator it)
{ c->start_embed<Coverage> ()->serialize (c, it); }
}
}
}
#endif // #ifndef OT_LAYOUT_COMMON_COVERAGE_HH

View File

@ -0,0 +1,133 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod, Garret Rieger
*/
#ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT1_HH
#define OT_LAYOUT_COMMON_COVERAGEFORMAT1_HH
namespace OT {
namespace Layout {
namespace Common {
#define NOT_COVERED ((unsigned int) -1)
template <typename Types>
struct CoverageFormat1_3
{
friend struct Coverage;
protected:
HBUINT16 coverageFormat; /* Format identifier--format = 1 */
SortedArray16Of<typename Types::HBGlyphID>
glyphArray; /* Array of GlyphIDs--in numerical order */
public:
DEFINE_SIZE_ARRAY (4, glyphArray);
private:
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (glyphArray.sanitize (c));
}
unsigned int get_coverage (hb_codepoint_t glyph_id) const
{
unsigned int i;
glyphArray.bfind (glyph_id, &i, HB_NOT_FOUND_STORE, NOT_COVERED);
return i;
}
unsigned get_population () const
{
return glyphArray.len;
}
template <typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, Iterator glyphs)
{
TRACE_SERIALIZE (this);
return_trace (glyphArray.serialize (c, glyphs));
}
bool intersects (const hb_set_t *glyphs) const
{
if (glyphArray.len > glyphs->get_population () * hb_bit_storage ((unsigned) glyphArray.len) / 2)
{
for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);)
if (get_coverage (g) != NOT_COVERED)
return true;
return false;
}
for (const auto& g : glyphArray.as_array ())
if (glyphs->has (g))
return true;
return false;
}
bool intersects_coverage (const hb_set_t *glyphs, unsigned int index) const
{ return glyphs->has (glyphArray[index]); }
template <typename IterableOut,
hb_requires (hb_is_sink_of (IterableOut, hb_codepoint_t))>
void intersect_set (const hb_set_t &glyphs, IterableOut&& intersect_glyphs) const
{
unsigned count = glyphArray.len;
for (unsigned i = 0; i < count; i++)
if (glyphs.has (glyphArray[i]))
intersect_glyphs << glyphArray[i];
}
template <typename set_t>
bool collect_coverage (set_t *glyphs) const
{ return glyphs->add_sorted_array (glyphArray.as_array ()); }
public:
/* Older compilers need this to be public. */
struct iter_t
{
void init (const struct CoverageFormat1_3 &c_) { c = &c_; i = 0; }
bool __more__ () const { return i < c->glyphArray.len; }
void __next__ () { i++; }
hb_codepoint_t get_glyph () const { return c->glyphArray[i]; }
bool operator != (const iter_t& o) const
{ return i != o.i; }
iter_t __end__ () const { iter_t it; it.init (*c); it.i = c->glyphArray.len; return it; }
private:
const struct CoverageFormat1_3 *c;
unsigned int i;
};
private:
};
}
}
}
#endif // #ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT1_HH

View File

@ -0,0 +1,232 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod, Garret Rieger
*/
#ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT2_HH
#define OT_LAYOUT_COMMON_COVERAGEFORMAT2_HH
#include "RangeRecord.hh"
namespace OT {
namespace Layout {
namespace Common {
template <typename Types>
struct CoverageFormat2_4
{
friend struct Coverage;
protected:
HBUINT16 coverageFormat; /* Format identifier--format = 2 */
SortedArray16Of<RangeRecord<Types>>
rangeRecord; /* Array of glyph ranges--ordered by
* Start GlyphID. rangeCount entries
* long */
public:
DEFINE_SIZE_ARRAY (4, rangeRecord);
private:
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (rangeRecord.sanitize (c));
}
unsigned int get_coverage (hb_codepoint_t glyph_id) const
{
const RangeRecord<Types> &range = rangeRecord.bsearch (glyph_id);
return likely (range.first <= range.last)
? (unsigned int) range.value + (glyph_id - range.first)
: NOT_COVERED;
}
unsigned get_population () const
{
typename Types::large_int ret = 0;
for (const auto &r : rangeRecord)
ret += r.get_population ();
return ret > UINT_MAX ? UINT_MAX : (unsigned) ret;
}
template <typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, Iterator glyphs)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
unsigned num_ranges = 0;
hb_codepoint_t last = (hb_codepoint_t) -2;
for (auto g: glyphs)
{
if (last + 1 != g)
num_ranges++;
last = g;
}
if (unlikely (!rangeRecord.serialize (c, num_ranges))) return_trace (false);
if (!num_ranges) return_trace (true);
unsigned count = 0;
unsigned range = (unsigned) -1;
last = (hb_codepoint_t) -2;
for (auto g: glyphs)
{
if (last + 1 != g)
{
range++;
rangeRecord[range].first = g;
rangeRecord[range].value = count;
}
rangeRecord[range].last = g;
last = g;
count++;
}
return_trace (true);
}
bool intersects (const hb_set_t *glyphs) const
{
if (rangeRecord.len > glyphs->get_population () * hb_bit_storage ((unsigned) rangeRecord.len) / 2)
{
for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);)
if (get_coverage (g) != NOT_COVERED)
return true;
return false;
}
return hb_any (+ hb_iter (rangeRecord)
| hb_map ([glyphs] (const RangeRecord<Types> &range) { return range.intersects (*glyphs); }));
}
bool intersects_coverage (const hb_set_t *glyphs, unsigned int index) const
{
auto *range = rangeRecord.as_array ().bsearch (index);
if (range)
return range->intersects (*glyphs);
return false;
}
template <typename IterableOut,
hb_requires (hb_is_sink_of (IterableOut, hb_codepoint_t))>
void intersect_set (const hb_set_t &glyphs, IterableOut&& intersect_glyphs) const
{
/* Break out of loop for overlapping, broken, tables,
* to avoid fuzzer timouts. */
hb_codepoint_t last = 0;
for (const auto& range : rangeRecord)
{
if (unlikely (range.first < last))
break;
last = range.last;
for (hb_codepoint_t g = range.first - 1;
glyphs.next (&g) && g <= last;)
intersect_glyphs << g;
}
}
template <typename set_t>
bool collect_coverage (set_t *glyphs) const
{
for (const auto& range: rangeRecord)
if (unlikely (!range.collect_coverage (glyphs)))
return false;
return true;
}
public:
/* Older compilers need this to be public. */
struct iter_t
{
void init (const CoverageFormat2_4 &c_)
{
c = &c_;
coverage = 0;
i = 0;
j = c->rangeRecord.len ? c->rangeRecord[0].first : 0;
if (unlikely (c->rangeRecord[0].first > c->rangeRecord[0].last))
{
/* Broken table. Skip. */
i = c->rangeRecord.len;
j = 0;
}
}
bool __more__ () const { return i < c->rangeRecord.len; }
void __next__ ()
{
if (j >= c->rangeRecord[i].last)
{
i++;
if (__more__ ())
{
unsigned int old = coverage;
j = c->rangeRecord[i].first;
coverage = c->rangeRecord[i].value;
if (unlikely (coverage != old + 1))
{
/* Broken table. Skip. Important to avoid DoS.
* Also, our callers depend on coverage being
* consecutive and monotonically increasing,
* ie. iota(). */
i = c->rangeRecord.len;
j = 0;
return;
}
}
else
j = 0;
return;
}
coverage++;
j++;
}
hb_codepoint_t get_glyph () const { return j; }
bool operator != (const iter_t& o) const
{ return i != o.i || j != o.j; }
iter_t __end__ () const
{
iter_t it;
it.init (*c);
it.i = c->rangeRecord.len;
it.j = 0;
return it;
}
private:
const struct CoverageFormat2_4 *c;
unsigned int i, coverage;
hb_codepoint_t j;
};
private:
};
}
}
}
#endif // #ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT2_HH

View File

@ -0,0 +1,85 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod, Garret Rieger
*/
#ifndef OT_LAYOUT_COMMON_RANGERECORD_HH
#define OT_LAYOUT_COMMON_RANGERECORD_HH
namespace OT {
namespace Layout {
namespace Common {
template <typename Types>
struct RangeRecord
{
typename Types::HBGlyphID first; /* First GlyphID in the range */
typename Types::HBGlyphID last; /* Last GlyphID in the range */
HBUINT16 value; /* Value */
DEFINE_SIZE_STATIC (2 + 2 * Types::size);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
int cmp (hb_codepoint_t g) const
{ return g < first ? -1 : g <= last ? 0 : +1; }
unsigned get_population () const
{
if (unlikely (last < first)) return 0;
return (last - first + 1);
}
bool intersects (const hb_set_t &glyphs) const
{ return glyphs.intersects (first, last); }
template <typename set_t>
bool collect_coverage (set_t *glyphs) const
{ return glyphs->add_range (first, last); }
};
}
}
}
// TODO(garretrieger): This was previously implemented using
// DECLARE_NULL_NAMESPACE_BYTES_TEMPLATE1 (OT, RangeRecord, 9);
// but that only works when there is only a single namespace level.
// The macro should probably be fixed so it can work in this situation.
extern HB_INTERNAL const unsigned char _hb_Null_OT_RangeRecord[9];
template <typename Spec>
struct Null<OT::Layout::Common::RangeRecord<Spec>> {
static OT::Layout::Common::RangeRecord<Spec> const & get_null () {
return *reinterpret_cast<const OT::Layout::Common::RangeRecord<Spec> *> (_hb_Null_OT_RangeRecord);
}
};
#endif // #ifndef OT_LAYOUT_COMMON_RANGERECORD_HH

View File

@ -0,0 +1,918 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2011,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod
*/
#ifndef OT_LAYOUT_GDEF_GDEF_HH
#define OT_LAYOUT_GDEF_GDEF_HH
#include "../../../hb-ot-layout-common.hh"
#include "../../../hb-font.hh"
namespace OT {
/*
* Attachment List Table
*/
/* Array of contour point indices--in increasing numerical order */
struct AttachPoint : Array16Of<HBUINT16>
{
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
return_trace (out->serialize (c->serializer, + iter ()));
}
};
struct AttachList
{
unsigned int get_attach_points (hb_codepoint_t glyph_id,
unsigned int start_offset,
unsigned int *point_count /* IN/OUT */,
unsigned int *point_array /* OUT */) const
{
unsigned int index = (this+coverage).get_coverage (glyph_id);
if (index == NOT_COVERED)
{
if (point_count)
*point_count = 0;
return 0;
}
const AttachPoint &points = this+attachPoint[index];
if (point_count)
{
+ points.as_array ().sub_array (start_offset, point_count)
| hb_sink (hb_array (point_array, *point_count))
;
}
return points.len;
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
hb_sorted_vector_t<hb_codepoint_t> new_coverage;
+ hb_zip (this+coverage, attachPoint)
| hb_filter (glyphset, hb_first)
| hb_filter (subset_offset_array (c, out->attachPoint, this), hb_second)
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
out->coverage.serialize_serialize (c->serializer, new_coverage.iter ());
return_trace (bool (new_coverage));
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (coverage.sanitize (c, this) && attachPoint.sanitize (c, this));
}
protected:
Offset16To<Coverage>
coverage; /* Offset to Coverage table -- from
* beginning of AttachList table */
Array16OfOffset16To<AttachPoint>
attachPoint; /* Array of AttachPoint tables
* in Coverage Index order */
public:
DEFINE_SIZE_ARRAY (4, attachPoint);
};
/*
* Ligature Caret Table
*/
struct CaretValueFormat1
{
friend struct CaretValue;
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (false);
return_trace (true);
}
private:
hb_position_t get_caret_value (hb_font_t *font, hb_direction_t direction) const
{
return HB_DIRECTION_IS_HORIZONTAL (direction) ? font->em_scale_x (coordinate) : font->em_scale_y (coordinate);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
protected:
HBUINT16 caretValueFormat; /* Format identifier--format = 1 */
FWORD coordinate; /* X or Y value, in design units */
public:
DEFINE_SIZE_STATIC (4);
};
struct CaretValueFormat2
{
friend struct CaretValue;
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (false);
return_trace (true);
}
private:
hb_position_t get_caret_value (hb_font_t *font, hb_direction_t direction, hb_codepoint_t glyph_id) const
{
hb_position_t x, y;
font->get_glyph_contour_point_for_origin (glyph_id, caretValuePoint, direction, &x, &y);
return HB_DIRECTION_IS_HORIZONTAL (direction) ? x : y;
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
protected:
HBUINT16 caretValueFormat; /* Format identifier--format = 2 */
HBUINT16 caretValuePoint; /* Contour point index on glyph */
public:
DEFINE_SIZE_STATIC (4);
};
struct CaretValueFormat3
{
friend struct CaretValue;
hb_position_t get_caret_value (hb_font_t *font, hb_direction_t direction,
const VariationStore &var_store) const
{
return HB_DIRECTION_IS_HORIZONTAL (direction) ?
font->em_scale_x (coordinate) + (this+deviceTable).get_x_delta (font, var_store) :
font->em_scale_y (coordinate) + (this+deviceTable).get_y_delta (font, var_store);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
if (!c->serializer->embed (caretValueFormat)) return_trace (false);
if (!c->serializer->embed (coordinate)) return_trace (false);
unsigned varidx = (this+deviceTable).get_variation_index ();
if (c->plan->layout_variation_idx_delta_map.has (varidx))
{
int delta = hb_second (c->plan->layout_variation_idx_delta_map.get (varidx));
if (delta != 0)
{
if (!c->serializer->check_assign (out->coordinate, coordinate + delta, HB_SERIALIZE_ERROR_INT_OVERFLOW))
return_trace (false);
}
}
if (c->plan->all_axes_pinned)
return_trace (c->serializer->check_assign (out->caretValueFormat, 1, HB_SERIALIZE_ERROR_INT_OVERFLOW));
if (!c->serializer->embed (deviceTable))
return_trace (false);
return_trace (out->deviceTable.serialize_copy (c->serializer, deviceTable, this, c->serializer->to_bias (out),
hb_serialize_context_t::Head, &c->plan->layout_variation_idx_delta_map));
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{ (this+deviceTable).collect_variation_indices (c); }
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) && deviceTable.sanitize (c, this));
}
protected:
HBUINT16 caretValueFormat; /* Format identifier--format = 3 */
FWORD coordinate; /* X or Y value, in design units */
Offset16To<Device>
deviceTable; /* Offset to Device table for X or Y
* value--from beginning of CaretValue
* table */
public:
DEFINE_SIZE_STATIC (6);
};
struct CaretValue
{
hb_position_t get_caret_value (hb_font_t *font,
hb_direction_t direction,
hb_codepoint_t glyph_id,
const VariationStore &var_store) const
{
switch (u.format) {
case 1: return u.format1.get_caret_value (font, direction);
case 2: return u.format2.get_caret_value (font, direction, glyph_id);
case 3: return u.format3.get_caret_value (font, direction, var_store);
default:return 0;
}
}
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
case 3: return_trace (c->dispatch (u.format3, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());
}
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
switch (u.format) {
case 1:
case 2:
return;
case 3:
u.format3.collect_variation_indices (c);
return;
default: return;
}
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
switch (u.format) {
case 1: return_trace (u.format1.sanitize (c));
case 2: return_trace (u.format2.sanitize (c));
case 3: return_trace (u.format3.sanitize (c));
default:return_trace (true);
}
}
protected:
union {
HBUINT16 format; /* Format identifier */
CaretValueFormat1 format1;
CaretValueFormat2 format2;
CaretValueFormat3 format3;
} u;
public:
DEFINE_SIZE_UNION (2, format);
};
struct LigGlyph
{
unsigned get_lig_carets (hb_font_t *font,
hb_direction_t direction,
hb_codepoint_t glyph_id,
const VariationStore &var_store,
unsigned start_offset,
unsigned *caret_count /* IN/OUT */,
hb_position_t *caret_array /* OUT */) const
{
if (caret_count)
{
+ carets.as_array ().sub_array (start_offset, caret_count)
| hb_map (hb_add (this))
| hb_map ([&] (const CaretValue &value) { return value.get_caret_value (font, direction, glyph_id, var_store); })
| hb_sink (hb_array (caret_array, *caret_count))
;
}
return carets.len;
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
+ hb_iter (carets)
| hb_apply (subset_offset_array (c, out->carets, this))
;
return_trace (bool (out->carets));
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
for (const Offset16To<CaretValue>& offset : carets.iter ())
(this+offset).collect_variation_indices (c);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (carets.sanitize (c, this));
}
protected:
Array16OfOffset16To<CaretValue>
carets; /* Offset array of CaretValue tables
* --from beginning of LigGlyph table
* --in increasing coordinate order */
public:
DEFINE_SIZE_ARRAY (2, carets);
};
struct LigCaretList
{
unsigned int get_lig_carets (hb_font_t *font,
hb_direction_t direction,
hb_codepoint_t glyph_id,
const VariationStore &var_store,
unsigned int start_offset,
unsigned int *caret_count /* IN/OUT */,
hb_position_t *caret_array /* OUT */) const
{
unsigned int index = (this+coverage).get_coverage (glyph_id);
if (index == NOT_COVERED)
{
if (caret_count)
*caret_count = 0;
return 0;
}
const LigGlyph &lig_glyph = this+ligGlyph[index];
return lig_glyph.get_lig_carets (font, direction, glyph_id, var_store, start_offset, caret_count, caret_array);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
hb_sorted_vector_t<hb_codepoint_t> new_coverage;
+ hb_zip (this+coverage, ligGlyph)
| hb_filter (glyphset, hb_first)
| hb_filter (subset_offset_array (c, out->ligGlyph, this), hb_second)
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
out->coverage.serialize_serialize (c->serializer, new_coverage.iter ());
return_trace (bool (new_coverage));
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
+ hb_zip (this+coverage, ligGlyph)
| hb_filter (c->glyph_set, hb_first)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const LigGlyph& _) { _.collect_variation_indices (c); })
;
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (coverage.sanitize (c, this) && ligGlyph.sanitize (c, this));
}
protected:
Offset16To<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of LigCaretList table */
Array16OfOffset16To<LigGlyph>
ligGlyph; /* Array of LigGlyph tables
* in Coverage Index order */
public:
DEFINE_SIZE_ARRAY (4, ligGlyph);
};
struct MarkGlyphSetsFormat1
{
bool covers (unsigned int set_index, hb_codepoint_t glyph_id) const
{ return (this+coverage[set_index]).get_coverage (glyph_id) != NOT_COVERED; }
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
bool ret = true;
for (const Offset32To<Coverage>& offset : coverage.iter ())
{
auto *o = out->coverage.serialize_append (c->serializer);
if (unlikely (!o))
{
ret = false;
break;
}
//not using o->serialize_subset (c, offset, this, out) here because
//OTS doesn't allow null offset.
//See issue: https://github.com/khaledhosny/ots/issues/172
c->serializer->push ();
c->dispatch (this+offset);
c->serializer->add_link (*o, c->serializer->pop_pack ());
}
return_trace (ret && out->coverage.len);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (coverage.sanitize (c, this));
}
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Array16Of<Offset32To<Coverage>>
coverage; /* Array of long offsets to mark set
* coverage tables */
public:
DEFINE_SIZE_ARRAY (4, coverage);
};
struct MarkGlyphSets
{
bool covers (unsigned int set_index, hb_codepoint_t glyph_id) const
{
switch (u.format) {
case 1: return u.format1.covers (set_index, glyph_id);
default:return false;
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
switch (u.format) {
case 1: return_trace (u.format1.subset (c));
default:return_trace (false);
}
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
switch (u.format) {
case 1: return_trace (u.format1.sanitize (c));
default:return_trace (true);
}
}
protected:
union {
HBUINT16 format; /* Format identifier */
MarkGlyphSetsFormat1 format1;
} u;
public:
DEFINE_SIZE_UNION (2, format);
};
/*
* GDEF -- Glyph Definition
* https://docs.microsoft.com/en-us/typography/opentype/spec/gdef
*/
template <typename Types>
struct GDEFVersion1_2
{
friend struct GDEF;
protected:
FixedVersion<>version; /* Version of the GDEF table--currently
* 0x00010003u */
typename Types::template OffsetTo<ClassDef>
glyphClassDef; /* Offset to class definition table
* for glyph type--from beginning of
* GDEF header (may be Null) */
typename Types::template OffsetTo<AttachList>
attachList; /* Offset to list of glyphs with
* attachment points--from beginning
* of GDEF header (may be Null) */
typename Types::template OffsetTo<LigCaretList>
ligCaretList; /* Offset to list of positioning points
* for ligature carets--from beginning
* of GDEF header (may be Null) */
typename Types::template OffsetTo<ClassDef>
markAttachClassDef; /* Offset to class definition table for
* mark attachment type--from beginning
* of GDEF header (may be Null) */
typename Types::template OffsetTo<MarkGlyphSets>
markGlyphSetsDef; /* Offset to the table of mark set
* definitions--from beginning of GDEF
* header (may be NULL). Introduced
* in version 0x00010002. */
Offset32To<VariationStore>
varStore; /* Offset to the table of Item Variation
* Store--from beginning of GDEF
* header (may be NULL). Introduced
* in version 0x00010003. */
public:
DEFINE_SIZE_MIN (4 + 4 * Types::size);
unsigned int get_size () const
{
return min_size +
(version.to_int () >= 0x00010002u ? markGlyphSetsDef.static_size : 0) +
(version.to_int () >= 0x00010003u ? varStore.static_size : 0);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (version.sanitize (c) &&
glyphClassDef.sanitize (c, this) &&
attachList.sanitize (c, this) &&
ligCaretList.sanitize (c, this) &&
markAttachClassDef.sanitize (c, this) &&
(version.to_int () < 0x00010002u || markGlyphSetsDef.sanitize (c, this)) &&
(version.to_int () < 0x00010003u || varStore.sanitize (c, this)));
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->embed (*this);
if (unlikely (!out)) return_trace (false);
bool subset_glyphclassdef = out->glyphClassDef.serialize_subset (c, glyphClassDef, this, nullptr, false, true);
bool subset_attachlist = out->attachList.serialize_subset (c, attachList, this);
bool subset_ligcaretlist = out->ligCaretList.serialize_subset (c, ligCaretList, this);
bool subset_markattachclassdef = out->markAttachClassDef.serialize_subset (c, markAttachClassDef, this, nullptr, false, true);
bool subset_markglyphsetsdef = false;
if (version.to_int () >= 0x00010002u)
{
subset_markglyphsetsdef = out->markGlyphSetsDef.serialize_subset (c, markGlyphSetsDef, this);
}
bool subset_varstore = false;
if (version.to_int () >= 0x00010003u)
{
if (c->plan->all_axes_pinned)
out->varStore = 0;
else
subset_varstore = out->varStore.serialize_subset (c, varStore, this, c->plan->gdef_varstore_inner_maps.as_array ());
}
if (subset_varstore)
{
out->version.minor = 3;
} else if (subset_markglyphsetsdef) {
out->version.minor = 2;
} else {
out->version.minor = 0;
}
return_trace (subset_glyphclassdef || subset_attachlist ||
subset_ligcaretlist || subset_markattachclassdef ||
(out->version.to_int () >= 0x00010002u && subset_markglyphsetsdef) ||
(out->version.to_int () >= 0x00010003u && subset_varstore));
}
};
struct GDEF
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_GDEF;
enum GlyphClasses {
UnclassifiedGlyph = 0,
BaseGlyph = 1,
LigatureGlyph = 2,
MarkGlyph = 3,
ComponentGlyph = 4
};
unsigned int get_size () const
{
switch (u.version.major) {
case 1: return u.version1.get_size ();
#ifndef HB_NO_BEYOND_64K
case 2: return u.version2.get_size ();
#endif
default: return u.version.static_size;
}
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (unlikely (!u.version.sanitize (c))) return_trace (false);
switch (u.version.major) {
case 1: return_trace (u.version1.sanitize (c));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (u.version2.sanitize (c));
#endif
default: return_trace (true);
}
}
bool subset (hb_subset_context_t *c) const
{
switch (u.version.major) {
case 1: return u.version1.subset (c);
#ifndef HB_NO_BEYOND_64K
case 2: return u.version2.subset (c);
#endif
default: return false;
}
}
bool has_glyph_classes () const
{
switch (u.version.major) {
case 1: return u.version1.glyphClassDef != 0;
#ifndef HB_NO_BEYOND_64K
case 2: return u.version2.glyphClassDef != 0;
#endif
default: return false;
}
}
const ClassDef &get_glyph_class_def () const
{
switch (u.version.major) {
case 1: return this+u.version1.glyphClassDef;
#ifndef HB_NO_BEYOND_64K
case 2: return this+u.version2.glyphClassDef;
#endif
default: return Null(ClassDef);
}
}
bool has_attach_list () const
{
switch (u.version.major) {
case 1: return u.version1.attachList != 0;
#ifndef HB_NO_BEYOND_64K
case 2: return u.version2.attachList != 0;
#endif
default: return false;
}
}
const AttachList &get_attach_list () const
{
switch (u.version.major) {
case 1: return this+u.version1.attachList;
#ifndef HB_NO_BEYOND_64K
case 2: return this+u.version2.attachList;
#endif
default: return Null(AttachList);
}
}
bool has_lig_carets () const
{
switch (u.version.major) {
case 1: return u.version1.ligCaretList != 0;
#ifndef HB_NO_BEYOND_64K
case 2: return u.version2.ligCaretList != 0;
#endif
default: return false;
}
}
const LigCaretList &get_lig_caret_list () const
{
switch (u.version.major) {
case 1: return this+u.version1.ligCaretList;
#ifndef HB_NO_BEYOND_64K
case 2: return this+u.version2.ligCaretList;
#endif
default: return Null(LigCaretList);
}
}
bool has_mark_attachment_types () const
{
switch (u.version.major) {
case 1: return u.version1.markAttachClassDef != 0;
#ifndef HB_NO_BEYOND_64K
case 2: return u.version2.markAttachClassDef != 0;
#endif
default: return false;
}
}
const ClassDef &get_mark_attach_class_def () const
{
switch (u.version.major) {
case 1: return this+u.version1.markAttachClassDef;
#ifndef HB_NO_BEYOND_64K
case 2: return this+u.version2.markAttachClassDef;
#endif
default: return Null(ClassDef);
}
}
bool has_mark_glyph_sets () const
{
switch (u.version.major) {
case 1: return u.version.to_int () >= 0x00010002u && u.version1.markGlyphSetsDef != 0;
#ifndef HB_NO_BEYOND_64K
case 2: return u.version2.markGlyphSetsDef != 0;
#endif
default: return false;
}
}
const MarkGlyphSets &get_mark_glyph_sets () const
{
switch (u.version.major) {
case 1: return u.version.to_int () >= 0x00010002u ? this+u.version1.markGlyphSetsDef : Null(MarkGlyphSets);
#ifndef HB_NO_BEYOND_64K
case 2: return this+u.version2.markGlyphSetsDef;
#endif
default: return Null(MarkGlyphSets);
}
}
bool has_var_store () const
{
switch (u.version.major) {
case 1: return u.version.to_int () >= 0x00010003u && u.version1.varStore != 0;
#ifndef HB_NO_BEYOND_64K
case 2: return u.version2.varStore != 0;
#endif
default: return false;
}
}
const VariationStore &get_var_store () const
{
switch (u.version.major) {
case 1: return u.version.to_int () >= 0x00010003u ? this+u.version1.varStore : Null(VariationStore);
#ifndef HB_NO_BEYOND_64K
case 2: return this+u.version2.varStore;
#endif
default: return Null(VariationStore);
}
}
bool has_data () const { return u.version.to_int (); }
unsigned int get_glyph_class (hb_codepoint_t glyph) const
{ return get_glyph_class_def ().get_class (glyph); }
void get_glyphs_in_class (unsigned int klass, hb_set_t *glyphs) const
{ get_glyph_class_def ().collect_class (glyphs, klass); }
unsigned int get_mark_attachment_type (hb_codepoint_t glyph) const
{ return get_mark_attach_class_def ().get_class (glyph); }
unsigned int get_attach_points (hb_codepoint_t glyph_id,
unsigned int start_offset,
unsigned int *point_count /* IN/OUT */,
unsigned int *point_array /* OUT */) const
{ return get_attach_list ().get_attach_points (glyph_id, start_offset, point_count, point_array); }
unsigned int get_lig_carets (hb_font_t *font,
hb_direction_t direction,
hb_codepoint_t glyph_id,
unsigned int start_offset,
unsigned int *caret_count /* IN/OUT */,
hb_position_t *caret_array /* OUT */) const
{ return get_lig_caret_list ().get_lig_carets (font,
direction, glyph_id, get_var_store(),
start_offset, caret_count, caret_array); }
bool mark_set_covers (unsigned int set_index, hb_codepoint_t glyph_id) const
{ return get_mark_glyph_sets ().covers (set_index, glyph_id); }
/* glyph_props is a 16-bit integer where the lower 8-bit have bits representing
* glyph class and other bits, and high 8-bit the mark attachment type (if any).
* Not to be confused with lookup_props which is very similar. */
unsigned int get_glyph_props (hb_codepoint_t glyph) const
{
unsigned int klass = get_glyph_class (glyph);
static_assert (((unsigned int) HB_OT_LAYOUT_GLYPH_PROPS_BASE_GLYPH == (unsigned int) LookupFlag::IgnoreBaseGlyphs), "");
static_assert (((unsigned int) HB_OT_LAYOUT_GLYPH_PROPS_LIGATURE == (unsigned int) LookupFlag::IgnoreLigatures), "");
static_assert (((unsigned int) HB_OT_LAYOUT_GLYPH_PROPS_MARK == (unsigned int) LookupFlag::IgnoreMarks), "");
switch (klass) {
default: return HB_OT_LAYOUT_GLYPH_CLASS_UNCLASSIFIED;
case BaseGlyph: return HB_OT_LAYOUT_GLYPH_PROPS_BASE_GLYPH;
case LigatureGlyph: return HB_OT_LAYOUT_GLYPH_PROPS_LIGATURE;
case MarkGlyph:
klass = get_mark_attachment_type (glyph);
return HB_OT_LAYOUT_GLYPH_PROPS_MARK | (klass << 8);
}
}
HB_INTERNAL bool is_blocklisted (hb_blob_t *blob,
hb_face_t *face) const;
struct accelerator_t
{
accelerator_t (hb_face_t *face)
{
table = hb_sanitize_context_t ().reference_table<GDEF> (face);
if (unlikely (table->is_blocklisted (table.get_blob (), face)))
{
hb_blob_destroy (table.get_blob ());
table = hb_blob_get_empty ();
}
}
~accelerator_t () { table.destroy (); }
hb_blob_ptr_t<GDEF> table;
};
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{ get_lig_caret_list ().collect_variation_indices (c); }
void remap_layout_variation_indices (const hb_set_t *layout_variation_indices,
hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map /* OUT */) const
{
if (!has_var_store ()) return;
if (layout_variation_indices->is_empty ()) return;
unsigned new_major = 0, new_minor = 0;
unsigned last_major = (layout_variation_indices->get_min ()) >> 16;
for (unsigned idx : layout_variation_indices->iter ())
{
uint16_t major = idx >> 16;
if (major >= get_var_store ().get_sub_table_count ()) break;
if (major != last_major)
{
new_minor = 0;
++new_major;
}
unsigned new_idx = (new_major << 16) + new_minor;
if (!layout_variation_idx_delta_map->has (idx))
continue;
int delta = hb_second (layout_variation_idx_delta_map->get (idx));
layout_variation_idx_delta_map->set (idx, hb_pair_t<unsigned, int> (new_idx, delta));
++new_minor;
last_major = major;
}
}
protected:
union {
FixedVersion<> version; /* Version identifier */
GDEFVersion1_2<SmallTypes> version1;
#ifndef HB_NO_BEYOND_64K
GDEFVersion1_2<MediumTypes> version2;
#endif
} u;
public:
DEFINE_SIZE_MIN (4);
};
struct GDEF_accelerator_t : GDEF::accelerator_t {
GDEF_accelerator_t (hb_face_t *face) : GDEF::accelerator_t (face) {}
};
} /* namespace OT */
#endif /* OT_LAYOUT_GDEF_GDEF_HH */

View File

@ -58,8 +58,7 @@ struct Anchor
return_trace (bool (reinterpret_cast<Anchor *> (u.format1.copy (c->serializer))));
}
return_trace (bool (reinterpret_cast<Anchor *> (u.format2.copy (c->serializer))));
case 3: return_trace (bool (reinterpret_cast<Anchor *> (u.format3.copy (c->serializer,
c->plan->layout_variation_idx_map))));
case 3: return_trace (u.format3.subset (c));
default:return_trace (false);
}
}

View File

@ -41,24 +41,54 @@ struct AnchorFormat3
*y += (this+yDeviceTable).get_y_delta (font, c->var_store, c->var_store_cache);
}
AnchorFormat3* copy (hb_serialize_context_t *c,
const hb_map_t *layout_variation_idx_map) const
bool subset (hb_subset_context_t *c) const
{
TRACE_SERIALIZE (this);
if (!layout_variation_idx_map) return_trace (nullptr);
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
if (unlikely (!c->serializer->embed (format))) return_trace (false);
if (unlikely (!c->serializer->embed (xCoordinate))) return_trace (false);
if (unlikely (!c->serializer->embed (yCoordinate))) return_trace (false);
auto *out = c->embed<AnchorFormat3> (this);
if (unlikely (!out)) return_trace (nullptr);
unsigned x_varidx = xDeviceTable ? (this+xDeviceTable).get_variation_index () : HB_OT_LAYOUT_NO_VARIATIONS_INDEX;
if (c->plan->layout_variation_idx_delta_map.has (x_varidx))
{
int delta = hb_second (c->plan->layout_variation_idx_delta_map.get (x_varidx));
if (delta != 0)
{
if (!c->serializer->check_assign (out->xCoordinate, xCoordinate + delta,
HB_SERIALIZE_ERROR_INT_OVERFLOW))
return_trace (false);
}
}
out->xDeviceTable.serialize_copy (c, xDeviceTable, this, 0, hb_serialize_context_t::Head, layout_variation_idx_map);
out->yDeviceTable.serialize_copy (c, yDeviceTable, this, 0, hb_serialize_context_t::Head, layout_variation_idx_map);
unsigned y_varidx = yDeviceTable ? (this+yDeviceTable).get_variation_index () : HB_OT_LAYOUT_NO_VARIATIONS_INDEX;
if (c->plan->layout_variation_idx_delta_map.has (y_varidx))
{
int delta = hb_second (c->plan->layout_variation_idx_delta_map.get (y_varidx));
if (delta != 0)
{
if (!c->serializer->check_assign (out->yCoordinate, yCoordinate + delta,
HB_SERIALIZE_ERROR_INT_OVERFLOW))
return_trace (false);
}
}
if (c->plan->all_axes_pinned)
return_trace (c->serializer->check_assign (out->format, 1, HB_SERIALIZE_ERROR_INT_OVERFLOW));
if (!c->serializer->embed (xDeviceTable)) return_trace (false);
if (!c->serializer->embed (yDeviceTable)) return_trace (false);
out->xDeviceTable.serialize_copy (c->serializer, xDeviceTable, this, 0, hb_serialize_context_t::Head, &c->plan->layout_variation_idx_delta_map);
out->yDeviceTable.serialize_copy (c->serializer, yDeviceTable, this, 0, hb_serialize_context_t::Head, &c->plan->layout_variation_idx_delta_map);
return_trace (out);
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
(this+xDeviceTable).collect_variation_indices (c->layout_variation_indices);
(this+yDeviceTable).collect_variation_indices (c->layout_variation_indices);
(this+xDeviceTable).collect_variation_indices (c);
(this+yDeviceTable).collect_variation_indices (c);
}
};

View File

@ -22,7 +22,8 @@ template<typename Iterator, typename SrcLookup>
static void SinglePos_serialize (hb_serialize_context_t *c,
const SrcLookup *src,
Iterator it,
const hb_map_t *layout_variation_idx_map);
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map,
bool all_axes_pinned);
}

View File

@ -19,8 +19,8 @@ struct CursivePos
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());

View File

@ -140,7 +140,14 @@ struct CursivePosFormat1
unsigned int i = skippy_iter.idx;
unsigned int j = buffer->idx;
buffer->unsafe_to_break (i, j);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"cursive attaching glyph at %u to glyph at %u",
i, j);
}
buffer->unsafe_to_break (i, j + 1);
float entry_x, entry_y, exit_x, exit_y;
(this+prev_record.exitAnchor).get_anchor (c, buffer->info[i].codepoint, &exit_x, &exit_y);
(this+this_record.entryAnchor).get_anchor (c, buffer->info[j].codepoint, &entry_x, &entry_y);
@ -223,7 +230,20 @@ struct CursivePosFormat1
* https://github.com/harfbuzz/harfbuzz/issues/2469
*/
if (unlikely (pos[parent].attach_chain() == -pos[child].attach_chain()))
{
pos[parent].attach_chain() = 0;
if (likely (HB_DIRECTION_IS_HORIZONTAL (c->direction)))
pos[parent].y_offset = 0;
else
pos[parent].x_offset = 0;
}
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"cursive attached glyph at %u to glyph at %u",
i, j);
}
buffer->idx++;
return_trace (true);

View File

@ -1,12 +1,15 @@
#ifndef OT_LAYOUT_GPOS_HH
#define OT_LAYOUT_GPOS_HH
#ifndef OT_LAYOUT_GPOS_GPOS_HH
#define OT_LAYOUT_GPOS_GPOS_HH
#include "../../hb-ot-layout-common.hh"
#include "../../hb-ot-layout-gsubgpos.hh"
#include "GPOS/Common.hh"
#include "GPOS/PosLookup.hh"
#include "../../../hb-ot-layout-common.hh"
#include "../../../hb-ot-layout-gsubgpos.hh"
#include "Common.hh"
#include "PosLookup.hh"
namespace OT {
using Layout::GPOS_impl::PosLookup;
namespace Layout {
static void
@ -25,10 +28,10 @@ struct GPOS : GSUBGPOS
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_GPOS;
using Lookup = GPOS_impl::PosLookup;
using Lookup = PosLookup;
const GPOS_impl::PosLookup& get_lookup (unsigned int i) const
{ return static_cast<const GPOS_impl::PosLookup &> (GSUBGPOS::get_lookup (i)); }
const PosLookup& get_lookup (unsigned int i) const
{ return static_cast<const PosLookup &> (GSUBGPOS::get_lookup (i)); }
static inline void position_start (hb_font_t *font, hb_buffer_t *buffer);
static inline void position_finish_advances (hb_font_t *font, hb_buffer_t *buffer);
@ -36,12 +39,15 @@ struct GPOS : GSUBGPOS
bool subset (hb_subset_context_t *c) const
{
hb_subset_layout_context_t l (c, tableTag, c->plan->gpos_lookups, c->plan->gpos_langsys, c->plan->gpos_features);
return GSUBGPOS::subset<GPOS_impl::PosLookup> (&l);
hb_subset_layout_context_t l (c, tableTag);
return GSUBGPOS::subset<PosLookup> (&l);
}
bool sanitize (hb_sanitize_context_t *c) const
{ return GSUBGPOS::sanitize<GPOS_impl::PosLookup> (c); }
{
TRACE_SANITIZE (this);
return_trace (GSUBGPOS::sanitize<PosLookup> (c));
}
HB_INTERNAL bool is_blocklisted (hb_blob_t *blob,
hb_face_t *face) const;
@ -51,7 +57,7 @@ struct GPOS : GSUBGPOS
for (unsigned i = 0; i < GSUBGPOS::get_lookup_count (); i++)
{
if (!c->gpos_lookups->has (i)) continue;
const GPOS_impl::PosLookup &l = get_lookup (i);
const PosLookup &l = get_lookup (i);
l.dispatch (c);
}
}
@ -59,7 +65,7 @@ struct GPOS : GSUBGPOS
void closure_lookups (hb_face_t *face,
const hb_set_t *glyphs,
hb_set_t *lookup_indexes /* IN/OUT */) const
{ GSUBGPOS::closure_lookups<GPOS_impl::PosLookup> (face, glyphs, lookup_indexes); }
{ GSUBGPOS::closure_lookups<PosLookup> (face, glyphs, lookup_indexes); }
typedef GSUBGPOS::accelerator_t<GPOS> accelerator_t;
};
@ -162,4 +168,4 @@ struct GPOS_accelerator_t : Layout::GPOS::accelerator_t {
}
#endif /* OT_LAYOUT_GPOS_HH */
#endif /* OT_LAYOUT_GPOS_GPOS_HH */

View File

@ -0,0 +1,56 @@
#ifndef OT_LAYOUT_GPOS_LIGATUREARRAY_HH
#define OT_LAYOUT_GPOS_LIGATUREARRAY_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
typedef AnchorMatrix LigatureAttach; /* component-major--
* in order of writing direction--,
* mark-minor--
* ordered by class--zero-based. */
/* Array of LigatureAttach tables ordered by LigatureCoverage Index */
struct LigatureArray : List16OfOffset16To<LigatureAttach>
{
template <typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
bool subset (hb_subset_context_t *c,
Iterator coverage,
unsigned class_count,
const hb_map_t *klass_mapping) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
for (const auto _ : + hb_zip (coverage, *this)
| hb_filter (glyphset, hb_first))
{
auto *matrix = out->serialize_append (c->serializer);
if (unlikely (!matrix)) return_trace (false);
const LigatureAttach& src = (this + _.second);
auto indexes =
+ hb_range (src.rows * class_count)
| hb_filter ([=] (unsigned index) { return klass_mapping->has (index % class_count); })
;
matrix->serialize_subset (c,
_.second,
this,
src.rows,
indexes);
}
return_trace (this->len);
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_LIGATUREARRAY_HH */

View File

@ -39,6 +39,13 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
mark_anchor.get_anchor (c, buffer->cur().codepoint, &mark_x, &mark_y);
glyph_anchor.get_anchor (c, buffer->info[glyph_pos].codepoint, &base_x, &base_y);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"attaching mark glyph at %u to glyph at %u",
c->buffer->idx, glyph_pos);
}
hb_glyph_position_t &o = buffer->cur_pos();
o.x_offset = roundf (base_x - mark_x);
o.y_offset = roundf (base_y - mark_y);
@ -46,6 +53,13 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
o.attach_chain() = (int) glyph_pos - (int) buffer->idx;
buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT;
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"attached mark glyph at %u to glyph at %u",
c->buffer->idx, glyph_pos);
}
buffer->idx++;
return_trace (true);
}
@ -83,10 +97,11 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
}
};
static void Markclass_closure_and_remap_indexes (const Coverage &mark_coverage,
const MarkArray &mark_array,
const hb_set_t &glyphset,
hb_map_t* klass_mapping /* INOUT */)
HB_INTERNAL inline
void Markclass_closure_and_remap_indexes (const Coverage &mark_coverage,
const MarkArray &mark_array,
const hb_set_t &glyphset,
hb_map_t* klass_mapping /* INOUT */)
{
hb_set_t orig_classes;

View File

@ -11,18 +11,24 @@ struct MarkBasePos
{
protected:
union {
HBUINT16 format; /* Format identifier */
MarkBasePosFormat1 format1;
HBUINT16 format; /* Format identifier */
MarkBasePosFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MarkBasePosFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}

View File

@ -12,26 +12,27 @@ typedef AnchorMatrix BaseArray; /* base-major--
* mark-minor--
* ordered by class--zero-based. */
struct MarkBasePosFormat1
template <typename Types>
struct MarkBasePosFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
markCoverage; /* Offset to MarkCoverage table--from
* beginning of MarkBasePos subtable */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
baseCoverage; /* Offset to BaseCoverage table--from
* beginning of MarkBasePos subtable */
HBUINT16 classCount; /* Number of classes defined for marks */
Offset16To<MarkArray>
typename Types::template OffsetTo<MarkArray>
markArray; /* Offset to MarkArray table--from
* beginning of MarkBasePos subtable */
Offset16To<BaseArray>
typename Types::template OffsetTo<BaseArray>
baseArray; /* Offset to BaseArray table--from
* beginning of MarkBasePos subtable */
public:
DEFINE_SIZE_STATIC (12);
DEFINE_SIZE_STATIC (4 + 4 * Types::size);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -89,6 +90,25 @@ struct MarkBasePosFormat1
const Coverage &get_coverage () const { return this+markCoverage; }
static inline bool accept (hb_buffer_t *buffer, unsigned idx)
{
/* We only want to attach to the first of a MultipleSubst sequence.
* https://github.com/harfbuzz/harfbuzz/issues/740
* Reject others...
* ...but stop if we find a mark in the MultipleSubst sequence:
* https://github.com/harfbuzz/harfbuzz/issues/1020 */
return !_hb_glyph_info_multiplied (&buffer->info[idx]) ||
0 == _hb_glyph_info_get_lig_comp (&buffer->info[idx]) ||
(idx == 0 ||
_hb_glyph_info_is_mark (&buffer->info[idx - 1]) ||
!_hb_glyph_info_multiplied (&buffer->info[idx - 1]) ||
_hb_glyph_info_get_lig_id (&buffer->info[idx]) !=
_hb_glyph_info_get_lig_id (&buffer->info[idx - 1]) ||
_hb_glyph_info_get_lig_comp (&buffer->info[idx]) !=
_hb_glyph_info_get_lig_comp (&buffer->info[idx - 1]) + 1
);
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
@ -96,47 +116,54 @@ struct MarkBasePosFormat1
unsigned int mark_index = (this+markCoverage).get_coverage (buffer->cur().codepoint);
if (likely (mark_index == NOT_COVERED)) return_trace (false);
/* Now we search backwards for a non-mark glyph */
/* Now we search backwards for a non-mark glyph.
* We don't use skippy_iter.prev() to avoid O(n^2) behavior. */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
do {
unsigned unsafe_from;
if (!skippy_iter.prev (&unsafe_from))
{
buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1);
return_trace (false);
}
/* We only want to attach to the first of a MultipleSubst sequence.
* https://github.com/harfbuzz/harfbuzz/issues/740
* Reject others...
* ...but stop if we find a mark in the MultipleSubst sequence:
* https://github.com/harfbuzz/harfbuzz/issues/1020 */
if (!_hb_glyph_info_multiplied (&buffer->info[skippy_iter.idx]) ||
0 == _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) ||
(skippy_iter.idx == 0 ||
_hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx - 1]) ||
_hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx]) !=
_hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx - 1]) ||
_hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) !=
_hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx - 1]) + 1
))
break;
skippy_iter.reject ();
} while (true);
/* Checking that matched glyph is actually a base glyph by GDEF is too strong; disabled */
//if (!_hb_glyph_info_is_base_glyph (&buffer->info[skippy_iter.idx])) { return_trace (false); }
unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[skippy_iter.idx].codepoint);
if (base_index == NOT_COVERED)
if (c->last_base_until > buffer->idx)
{
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
c->last_base_until = 0;
c->last_base = -1;
}
unsigned j;
for (j = buffer->idx; j > c->last_base_until; j--)
{
auto match = skippy_iter.match (buffer->info[j - 1]);
if (match == skippy_iter.MATCH)
{
// https://github.com/harfbuzz/harfbuzz/issues/4124
if (!accept (buffer, j - 1) &&
NOT_COVERED == (this+baseCoverage).get_coverage (buffer->info[j - 1].codepoint))
match = skippy_iter.SKIP;
}
if (match == skippy_iter.MATCH)
{
c->last_base = (signed) j - 1;
break;
}
}
c->last_base_until = buffer->idx;
if (c->last_base == -1)
{
buffer->unsafe_to_concat_from_outbuffer (0, buffer->idx + 1);
return_trace (false);
}
return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, skippy_iter.idx));
unsigned idx = (unsigned) c->last_base;
/* Checking that matched glyph is actually a base glyph by GDEF is too strong; disabled */
//if (!_hb_glyph_info_is_base_glyph (&buffer->info[idx])) { return_trace (false); }
unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[idx].codepoint);
if (base_index == NOT_COVERED)
{
buffer->unsafe_to_concat_from_outbuffer (idx, buffer->idx + 1);
return_trace (false);
}
return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, idx));
}
bool subset (hb_subset_context_t *c) const

View File

@ -11,18 +11,24 @@ struct MarkLigPos
{
protected:
union {
HBUINT16 format; /* Format identifier */
MarkLigPosFormat1 format1;
HBUINT16 format; /* Format identifier */
MarkLigPosFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MarkLigPosFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}

View File

@ -1,72 +1,34 @@
#ifndef OT_LAYOUT_GPOS_MARKLIGPOSFORMAT1_HH
#define OT_LAYOUT_GPOS_MARKLIGPOSFORMAT1_HH
#include "LigatureArray.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
typedef AnchorMatrix LigatureAttach; /* component-major--
* in order of writing direction--,
* mark-minor--
* ordered by class--zero-based. */
/* Array of LigatureAttach tables ordered by LigatureCoverage Index */
struct LigatureArray : List16OfOffset16To<LigatureAttach>
{
template <typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
bool subset (hb_subset_context_t *c,
Iterator coverage,
unsigned class_count,
const hb_map_t *klass_mapping) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
for (const auto _ : + hb_zip (coverage, *this)
| hb_filter (glyphset, hb_first))
{
auto *matrix = out->serialize_append (c->serializer);
if (unlikely (!matrix)) return_trace (false);
const LigatureAttach& src = (this + _.second);
auto indexes =
+ hb_range (src.rows * class_count)
| hb_filter ([=] (unsigned index) { return klass_mapping->has (index % class_count); })
;
matrix->serialize_subset (c,
_.second,
this,
src.rows,
indexes);
}
return_trace (this->len);
}
};
struct MarkLigPosFormat1
template <typename Types>
struct MarkLigPosFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
markCoverage; /* Offset to Mark Coverage table--from
* beginning of MarkLigPos subtable */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
ligatureCoverage; /* Offset to Ligature Coverage
* table--from beginning of MarkLigPos
* subtable */
HBUINT16 classCount; /* Number of defined mark classes */
Offset16To<MarkArray>
typename Types::template OffsetTo<MarkArray>
markArray; /* Offset to MarkArray table--from
* beginning of MarkLigPos subtable */
Offset16To<LigatureArray>
typename Types::template OffsetTo<LigatureArray>
ligatureArray; /* Offset to LigatureArray table--from
* beginning of MarkLigPos subtable */
public:
DEFINE_SIZE_STATIC (12);
DEFINE_SIZE_STATIC (4 + 4 * Types::size);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -138,20 +100,37 @@ struct MarkLigPosFormat1
if (likely (mark_index == NOT_COVERED)) return_trace (false);
/* Now we search backwards for a non-mark glyph */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
unsigned unsafe_from;
if (!skippy_iter.prev (&unsafe_from))
if (c->last_base_until > buffer->idx)
{
buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1);
c->last_base_until = 0;
c->last_base = -1;
}
unsigned j;
for (j = buffer->idx; j > c->last_base_until; j--)
{
auto match = skippy_iter.match (buffer->info[j - 1]);
if (match == skippy_iter.MATCH)
{
c->last_base = (signed) j - 1;
break;
}
}
c->last_base_until = buffer->idx;
if (c->last_base == -1)
{
buffer->unsafe_to_concat_from_outbuffer (0, buffer->idx + 1);
return_trace (false);
}
/* Checking that matched glyph is actually a ligature by GDEF is too strong; disabled */
//if (!_hb_glyph_info_is_ligature (&buffer->info[skippy_iter.idx])) { return_trace (false); }
j = (unsigned) c->last_base;
/* Checking that matched glyph is actually a ligature by GDEF is too strong; disabled */
//if (!_hb_glyph_info_is_ligature (&buffer->info[j])) { return_trace (false); }
unsigned int j = skippy_iter.idx;
unsigned int lig_index = (this+ligatureCoverage).get_coverage (buffer->info[j].codepoint);
if (lig_index == NOT_COVERED)
{

View File

@ -11,18 +11,24 @@ struct MarkMarkPos
{
protected:
union {
HBUINT16 format; /* Format identifier */
MarkMarkPosFormat1 format1;
HBUINT16 format; /* Format identifier */
MarkMarkPosFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MarkMarkPosFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}

View File

@ -12,27 +12,28 @@ typedef AnchorMatrix Mark2Array; /* mark2-major--
* mark1-minor--
* ordered by class--zero-based. */
struct MarkMarkPosFormat1
template <typename Types>
struct MarkMarkPosFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
mark1Coverage; /* Offset to Combining Mark1 Coverage
* table--from beginning of MarkMarkPos
* subtable */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
mark2Coverage; /* Offset to Combining Mark2 Coverage
* table--from beginning of MarkMarkPos
* subtable */
HBUINT16 classCount; /* Number of defined mark classes */
Offset16To<MarkArray>
typename Types::template OffsetTo<MarkArray>
mark1Array; /* Offset to Mark1Array table--from
* beginning of MarkMarkPos subtable */
Offset16To<Mark2Array>
typename Types::template OffsetTo<Mark2Array>
mark2Array; /* Offset to Mark2Array table--from
* beginning of MarkMarkPos subtable */
public:
DEFINE_SIZE_STATIC (12);
DEFINE_SIZE_STATIC (4 + 4 * Types::size);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -100,7 +101,7 @@ struct MarkMarkPosFormat1
/* now we search backwards for a suitable mark glyph until a non-mark glyph */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.set_lookup_props (c->lookup_props & ~LookupFlag::IgnoreFlags);
skippy_iter.set_lookup_props (c->lookup_props & ~(uint32_t)LookupFlag::IgnoreFlags);
unsigned unsafe_from;
if (!skippy_iter.prev (&unsafe_from))
{

View File

@ -9,7 +9,7 @@ struct MarkRecord
{
friend struct MarkArray;
protected:
public:
HBUINT16 klass; /* Class defined for this mark */
Offset16To<Anchor>
markAnchor; /* Offset to Anchor table--from

View File

@ -12,20 +12,28 @@ struct PairPos
{
protected:
union {
HBUINT16 format; /* Format identifier */
PairPosFormat1 format1;
PairPosFormat2 format2;
HBUINT16 format; /* Format identifier */
PairPosFormat1_3<SmallTypes> format1;
PairPosFormat2_4<SmallTypes> format2;
#ifndef HB_NO_BEYOND_64K
PairPosFormat1_3<MediumTypes> format3;
PairPosFormat2_4<MediumTypes> format4;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 3: return_trace (c->dispatch (u.format3, std::forward<Ts> (ds)...));
case 4: return_trace (c->dispatch (u.format4, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}

View File

@ -1,248 +1,22 @@
#ifndef OT_LAYOUT_GPOS_PAIRPOSFORMAT1_HH
#define OT_LAYOUT_GPOS_PAIRPOSFORMAT1_HH
#include "PairSet.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct PairValueRecord
template <typename Types>
struct PairPosFormat1_3
{
friend struct PairSet;
using PairSet = GPOS_impl::PairSet<Types>;
using PairValueRecord = GPOS_impl::PairValueRecord<Types>;
int cmp (hb_codepoint_t k) const
{ return secondGlyph.cmp (k); }
struct context_t
{
const void *base;
const ValueFormat *valueFormats;
const ValueFormat *newFormats;
unsigned len1; /* valueFormats[0].get_len() */
const hb_map_t *glyph_map;
const hb_map_t *layout_variation_idx_map;
};
bool subset (hb_subset_context_t *c,
context_t *closure) const
{
TRACE_SERIALIZE (this);
auto *s = c->serializer;
auto *out = s->start_embed (*this);
if (unlikely (!s->extend_min (out))) return_trace (false);
out->secondGlyph = (*closure->glyph_map)[secondGlyph];
closure->valueFormats[0].copy_values (s,
closure->newFormats[0],
closure->base, &values[0],
closure->layout_variation_idx_map);
closure->valueFormats[1].copy_values (s,
closure->newFormats[1],
closure->base,
&values[closure->len1],
closure->layout_variation_idx_map);
return_trace (true);
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
const ValueFormat *valueFormats,
const void *base) const
{
unsigned record1_len = valueFormats[0].get_len ();
unsigned record2_len = valueFormats[1].get_len ();
const hb_array_t<const Value> values_array = values.as_array (record1_len + record2_len);
if (valueFormats[0].has_device ())
valueFormats[0].collect_variation_indices (c, base, values_array.sub_array (0, record1_len));
if (valueFormats[1].has_device ())
valueFormats[1].collect_variation_indices (c, base, values_array.sub_array (record1_len, record2_len));
}
bool intersects (const hb_set_t& glyphset) const
{
return glyphset.has(secondGlyph);
}
const Value* get_values_1 () const
{
return &values[0];
}
const Value* get_values_2 (ValueFormat format1) const
{
return &values[format1.get_len ()];
}
protected:
HBGlyphID16 secondGlyph; /* GlyphID of second glyph in the
* pair--first glyph is listed in the
* Coverage table */
ValueRecord values; /* Positioning data for the first glyph
* followed by for second glyph */
public:
DEFINE_SIZE_ARRAY (2, values);
};
struct PairSet
{
friend struct PairPosFormat1;
bool intersects (const hb_set_t *glyphs,
const ValueFormat *valueFormats) const
{
unsigned int len1 = valueFormats[0].get_len ();
unsigned int len2 = valueFormats[1].get_len ();
unsigned int record_size = HBUINT16::static_size * (1 + len1 + len2);
const PairValueRecord *record = &firstPairValueRecord;
unsigned int count = len;
for (unsigned int i = 0; i < count; i++)
{
if (glyphs->has (record->secondGlyph))
return true;
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
return false;
}
void collect_glyphs (hb_collect_glyphs_context_t *c,
const ValueFormat *valueFormats) const
{
unsigned int len1 = valueFormats[0].get_len ();
unsigned int len2 = valueFormats[1].get_len ();
unsigned int record_size = HBUINT16::static_size * (1 + len1 + len2);
const PairValueRecord *record = &firstPairValueRecord;
c->input->add_array (&record->secondGlyph, len, record_size);
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
const ValueFormat *valueFormats) const
{
unsigned len1 = valueFormats[0].get_len ();
unsigned len2 = valueFormats[1].get_len ();
unsigned record_size = HBUINT16::static_size * (1 + len1 + len2);
const PairValueRecord *record = &firstPairValueRecord;
unsigned count = len;
for (unsigned i = 0; i < count; i++)
{
if (c->glyph_set->has (record->secondGlyph))
{ record->collect_variation_indices (c, valueFormats, this); }
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
}
bool apply (hb_ot_apply_context_t *c,
const ValueFormat *valueFormats,
unsigned int pos) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
unsigned int len1 = valueFormats[0].get_len ();
unsigned int len2 = valueFormats[1].get_len ();
unsigned int record_size = HBUINT16::static_size * (1 + len1 + len2);
const PairValueRecord *record = hb_bsearch (buffer->info[pos].codepoint,
&firstPairValueRecord,
len,
record_size);
if (record)
{
bool applied_first = valueFormats[0].apply_value (c, this, &record->values[0], buffer->cur_pos());
bool applied_second = valueFormats[1].apply_value (c, this, &record->values[len1], buffer->pos[pos]);
if (applied_first || applied_second)
buffer->unsafe_to_break (buffer->idx, pos + 1);
if (len2)
pos++;
buffer->idx = pos;
return_trace (true);
}
buffer->unsafe_to_concat (buffer->idx, pos + 1);
return_trace (false);
}
bool subset (hb_subset_context_t *c,
const ValueFormat valueFormats[2],
const ValueFormat newFormats[2]) const
{
TRACE_SUBSET (this);
auto snap = c->serializer->snapshot ();
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->len = 0;
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
unsigned len1 = valueFormats[0].get_len ();
unsigned len2 = valueFormats[1].get_len ();
unsigned record_size = HBUINT16::static_size + Value::static_size * (len1 + len2);
PairValueRecord::context_t context =
{
this,
valueFormats,
newFormats,
len1,
&glyph_map,
c->plan->layout_variation_idx_map
};
const PairValueRecord *record = &firstPairValueRecord;
unsigned count = len, num = 0;
for (unsigned i = 0; i < count; i++)
{
if (glyphset.has (record->secondGlyph)
&& record->subset (c, &context)) num++;
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
out->len = num;
if (!num) c->serializer->revert (snap);
return_trace (num);
}
struct sanitize_closure_t
{
const ValueFormat *valueFormats;
unsigned int len1; /* valueFormats[0].get_len() */
unsigned int stride; /* 1 + len1 + len2 */
};
bool sanitize (hb_sanitize_context_t *c, const sanitize_closure_t *closure) const
{
TRACE_SANITIZE (this);
if (!(c->check_struct (this)
&& c->check_range (&firstPairValueRecord,
len,
HBUINT16::static_size,
closure->stride))) return_trace (false);
unsigned int count = len;
const PairValueRecord *record = &firstPairValueRecord;
return_trace (closure->valueFormats[0].sanitize_values_stride_unsafe (c, this, &record->values[0], count, closure->stride) &&
closure->valueFormats[1].sanitize_values_stride_unsafe (c, this, &record->values[closure->len1], count, closure->stride));
}
protected:
HBUINT16 len; /* Number of PairValueRecords */
PairValueRecord firstPairValueRecord;
/* Array of PairValueRecords--ordered
* by GlyphID of the second glyph */
public:
DEFINE_SIZE_MIN (2);
};
struct PairPosFormat1
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of subtable */
ValueFormat valueFormat[2]; /* [0] Defines the types of data in
@ -251,11 +25,11 @@ struct PairPosFormat1
/* [1] Defines the types of data in
* ValueRecord2--for the second glyph
* in the pair--may be zero (0) */
Array16OfOffset16To<PairSet>
Array16Of<typename Types::template OffsetTo<PairSet>>
pairSet; /* Array of PairSet tables
* ordered by Coverage Index */
public:
DEFINE_SIZE_ARRAY (10, pairSet);
DEFINE_SIZE_ARRAY (8 + Types::size, pairSet);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -265,24 +39,36 @@ struct PairPosFormat1
unsigned int len1 = valueFormat[0].get_len ();
unsigned int len2 = valueFormat[1].get_len ();
PairSet::sanitize_closure_t closure =
typename PairSet::sanitize_closure_t closure =
{
valueFormat,
len1,
1 + len1 + len2
PairSet::get_size (len1, len2)
};
return_trace (coverage.sanitize (c, this) && pairSet.sanitize (c, this, &closure));
}
bool intersects (const hb_set_t *glyphs) const
{
auto &cov = this+coverage;
if (pairSet.len > glyphs->get_population () * hb_bit_storage ((unsigned) pairSet.len) / 4)
{
for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);)
{
unsigned i = cov.get_coverage (g);
if ((this+pairSet[i]).intersects (glyphs, valueFormat))
return true;
}
return false;
}
return
+ hb_zip (this+coverage, pairSet)
+ hb_zip (cov, pairSet)
| hb_filter (*glyphs, hb_first)
| hb_map (hb_second)
| hb_map ([glyphs, this] (const Offset16To<PairSet> &_)
| hb_map ([glyphs, this] (const typename Types::template OffsetTo<PairSet> &_)
{ return (this+_).intersects (glyphs, valueFormat); })
| hb_any
;
@ -354,11 +140,17 @@ struct PairPosFormat1
out->valueFormat[1] = newFormats.second;
}
if (c->plan->all_axes_pinned)
{
out->valueFormat[0] = out->valueFormat[0].drop_device_table_flags ();
out->valueFormat[1] = out->valueFormat[1].drop_device_table_flags ();
}
hb_sorted_vector_t<hb_codepoint_t> new_coverage;
+ hb_zip (this+coverage, pairSet)
| hb_filter (glyphset, hb_first)
| hb_filter ([this, c, out] (const Offset16To<PairSet>& _)
| hb_filter ([this, c, out] (const typename Types::template OffsetTo<PairSet>& _)
{
auto snap = c->serializer->snapshot ();
auto *o = out->pairSet.serialize_append (c->serializer);
@ -385,19 +177,21 @@ struct PairPosFormat1
hb_pair_t<unsigned, unsigned> compute_effective_value_formats (const hb_set_t& glyphset) const
{
unsigned len1 = valueFormat[0].get_len ();
unsigned len2 = valueFormat[1].get_len ();
unsigned record_size = HBUINT16::static_size + Value::static_size * (len1 + len2);
unsigned record_size = PairSet::get_size (valueFormat);
unsigned format1 = 0;
unsigned format2 = 0;
for (const Offset16To<PairSet>& _ :
+ hb_zip (this+coverage, pairSet) | hb_filter (glyphset, hb_first) | hb_map (hb_second))
for (const auto & _ :
+ hb_zip (this+coverage, pairSet)
| hb_filter (glyphset, hb_first)
| hb_map (hb_second)
)
{
const PairSet& set = (this + _);
const PairValueRecord *record = &set.firstPairValueRecord;
for (unsigned i = 0; i < set.len; i++)
unsigned count = set.len;
for (unsigned i = 0; i < count; i++)
{
if (record->intersects (glyphset))
{
@ -406,6 +200,9 @@ struct PairPosFormat1
}
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
if (format1 == valueFormat[0] && format2 == valueFormat[1])
break;
}
return hb_pair (format1, format2);

View File

@ -7,11 +7,12 @@ namespace OT {
namespace Layout {
namespace GPOS_impl {
struct PairPosFormat2
template <typename Types>
struct PairPosFormat2_4
{
protected:
HBUINT16 format; /* Format identifier--format = 2 */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of subtable */
ValueFormat valueFormat1; /* ValueRecord definition--for the
@ -20,11 +21,11 @@ struct PairPosFormat2
ValueFormat valueFormat2; /* ValueRecord definition--for the
* second glyph of the pair--may be
* zero (0) */
Offset16To<ClassDef>
typename Types::template OffsetTo<ClassDef>
classDef1; /* Offset to ClassDef table--from
* beginning of PairPos subtable--for
* the first glyph of the pair */
Offset16To<ClassDef>
typename Types::template OffsetTo<ClassDef>
classDef2; /* Offset to ClassDef table--from
* beginning of PairPos subtable--for
* the second glyph of the pair */
@ -36,7 +37,7 @@ struct PairPosFormat2
* class1-major, class2-minor,
* Each entry has value1 and value2 */
public:
DEFINE_SIZE_ARRAY (16, values);
DEFINE_SIZE_ARRAY (10 + 3 * Types::size, values);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -48,7 +49,7 @@ struct PairPosFormat2
unsigned int len1 = valueFormat1.get_len ();
unsigned int len2 = valueFormat2.get_len ();
unsigned int stride = len1 + len2;
unsigned int stride = HBUINT16::static_size * (len1 + len2);
unsigned int record_size = valueFormat1.get_size () + valueFormat2.get_size ();
unsigned int count = (unsigned int) class1Count * (unsigned int) class2Count;
return_trace (c->check_range ((const void *) values,
@ -216,10 +217,31 @@ struct PairPosFormat2
}
bail:
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"try kerning glyphs at %u,%u",
c->buffer->idx, skippy_iter.idx);
}
applied_first = valueFormat1.apply_value (c, this, v, buffer->cur_pos());
applied_second = valueFormat2.apply_value (c, this, v + len1, buffer->pos[skippy_iter.idx]);
if (applied_first || applied_second)
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"kerned glyphs at %u,%u",
c->buffer->idx, skippy_iter.idx);
}
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"tried kerning glyphs at %u,%u",
c->buffer->idx, skippy_iter.idx);
}
success:
if (applied_first || applied_second)
buffer->unsafe_to_break (buffer->idx, skippy_iter.idx + 1);
@ -227,10 +249,15 @@ struct PairPosFormat2
boring:
buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1);
if (len2)
{
skippy_iter.idx++;
// https://github.com/harfbuzz/harfbuzz/issues/3824
// https://github.com/harfbuzz/harfbuzz/issues/3888#issuecomment-1326781116
buffer->unsafe_to_break (buffer->idx, skippy_iter.idx + 1);
}
buffer->idx = skippy_iter.idx;
if (len2)
buffer->idx++;
return_trace (true);
}
@ -260,13 +287,19 @@ struct PairPosFormat2
out->valueFormat1 = newFormats.first;
out->valueFormat2 = newFormats.second;
if (c->plan->all_axes_pinned)
{
out->valueFormat1 = out->valueFormat1.drop_device_table_flags ();
out->valueFormat2 = out->valueFormat2.drop_device_table_flags ();
}
for (unsigned class1_idx : + hb_range ((unsigned) class1Count) | hb_filter (klass1_map))
{
for (unsigned class2_idx : + hb_range ((unsigned) class2Count) | hb_filter (klass2_map))
{
unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2);
valueFormat1.copy_values (c->serializer, newFormats.first, this, &values[idx], c->plan->layout_variation_idx_map);
valueFormat2.copy_values (c->serializer, newFormats.second, this, &values[idx + len1], c->plan->layout_variation_idx_map);
valueFormat1.copy_values (c->serializer, out->valueFormat1, this, &values[idx], &c->plan->layout_variation_idx_delta_map);
valueFormat2.copy_values (c->serializer, out->valueFormat2, this, &values[idx + len1], &c->plan->layout_variation_idx_delta_map);
}
}
@ -289,6 +322,7 @@ struct PairPosFormat2
{
unsigned len1 = valueFormat1.get_len ();
unsigned len2 = valueFormat2.get_len ();
unsigned record_size = len1 + len2;
unsigned format1 = 0;
unsigned format2 = 0;
@ -297,10 +331,13 @@ struct PairPosFormat2
{
for (unsigned class2_idx : + hb_range ((unsigned) class2Count) | hb_filter (klass2_map))
{
unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2);
unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * record_size;
format1 = format1 | valueFormat1.get_effective_format (&values[idx]);
format2 = format2 | valueFormat2.get_effective_format (&values[idx + len1]);
}
if (format1 == valueFormat1 && format2 == valueFormat2)
break;
}
return hb_pair (format1, format2);

View File

@ -0,0 +1,207 @@
#ifndef OT_LAYOUT_GPOS_PAIRSET_HH
#define OT_LAYOUT_GPOS_PAIRSET_HH
#include "PairValueRecord.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
template <typename Types>
struct PairSet
{
template <typename Types2>
friend struct PairPosFormat1_3;
using PairValueRecord = GPOS_impl::PairValueRecord<Types>;
protected:
HBUINT16 len; /* Number of PairValueRecords */
PairValueRecord firstPairValueRecord;
/* Array of PairValueRecords--ordered
* by GlyphID of the second glyph */
public:
DEFINE_SIZE_MIN (2);
static unsigned get_size (unsigned len1, unsigned len2)
{
return Types::HBGlyphID::static_size + Value::static_size * (len1 + len2);
}
static unsigned get_size (const ValueFormat valueFormats[2])
{
unsigned len1 = valueFormats[0].get_len ();
unsigned len2 = valueFormats[1].get_len ();
return get_size (len1, len2);
}
struct sanitize_closure_t
{
const ValueFormat *valueFormats;
unsigned int len1; /* valueFormats[0].get_len() */
unsigned int stride; /* bytes */
};
bool sanitize (hb_sanitize_context_t *c, const sanitize_closure_t *closure) const
{
TRACE_SANITIZE (this);
if (!(c->check_struct (this)
&& c->check_range (&firstPairValueRecord,
len,
closure->stride))) return_trace (false);
unsigned int count = len;
const PairValueRecord *record = &firstPairValueRecord;
return_trace (closure->valueFormats[0].sanitize_values_stride_unsafe (c, this, &record->values[0], count, closure->stride) &&
closure->valueFormats[1].sanitize_values_stride_unsafe (c, this, &record->values[closure->len1], count, closure->stride));
}
bool intersects (const hb_set_t *glyphs,
const ValueFormat *valueFormats) const
{
unsigned record_size = get_size (valueFormats);
const PairValueRecord *record = &firstPairValueRecord;
unsigned int count = len;
for (unsigned int i = 0; i < count; i++)
{
if (glyphs->has (record->secondGlyph))
return true;
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
return false;
}
void collect_glyphs (hb_collect_glyphs_context_t *c,
const ValueFormat *valueFormats) const
{
unsigned record_size = get_size (valueFormats);
const PairValueRecord *record = &firstPairValueRecord;
c->input->add_array (&record->secondGlyph, len, record_size);
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
const ValueFormat *valueFormats) const
{
unsigned record_size = get_size (valueFormats);
const PairValueRecord *record = &firstPairValueRecord;
unsigned count = len;
for (unsigned i = 0; i < count; i++)
{
if (c->glyph_set->has (record->secondGlyph))
{ record->collect_variation_indices (c, valueFormats, this); }
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
}
bool apply (hb_ot_apply_context_t *c,
const ValueFormat *valueFormats,
unsigned int pos) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
unsigned int len1 = valueFormats[0].get_len ();
unsigned int len2 = valueFormats[1].get_len ();
unsigned record_size = get_size (len1, len2);
const PairValueRecord *record = hb_bsearch (buffer->info[pos].codepoint,
&firstPairValueRecord,
len,
record_size);
if (record)
{
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"try kerning glyphs at %u,%u",
c->buffer->idx, pos);
}
bool applied_first = valueFormats[0].apply_value (c, this, &record->values[0], buffer->cur_pos());
bool applied_second = valueFormats[1].apply_value (c, this, &record->values[len1], buffer->pos[pos]);
if (applied_first || applied_second)
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"kerned glyphs at %u,%u",
c->buffer->idx, pos);
}
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"tried kerning glyphs at %u,%u",
c->buffer->idx, pos);
}
if (applied_first || applied_second)
buffer->unsafe_to_break (buffer->idx, pos + 1);
if (len2)
{
pos++;
// https://github.com/harfbuzz/harfbuzz/issues/3824
// https://github.com/harfbuzz/harfbuzz/issues/3888#issuecomment-1326781116
buffer->unsafe_to_break (buffer->idx, pos + 1);
}
buffer->idx = pos;
return_trace (true);
}
buffer->unsafe_to_concat (buffer->idx, pos + 1);
return_trace (false);
}
bool subset (hb_subset_context_t *c,
const ValueFormat valueFormats[2],
const ValueFormat newFormats[2]) const
{
TRACE_SUBSET (this);
auto snap = c->serializer->snapshot ();
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->len = 0;
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
unsigned len1 = valueFormats[0].get_len ();
unsigned len2 = valueFormats[1].get_len ();
unsigned record_size = get_size (len1, len2);
typename PairValueRecord::context_t context =
{
this,
valueFormats,
newFormats,
len1,
&glyph_map,
&c->plan->layout_variation_idx_delta_map
};
const PairValueRecord *record = &firstPairValueRecord;
unsigned count = len, num = 0;
for (unsigned i = 0; i < count; i++)
{
if (glyphset.has (record->secondGlyph)
&& record->subset (c, &context)) num++;
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
out->len = num;
if (!num) c->serializer->revert (snap);
return_trace (num);
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_PAIRSET_HH

View File

@ -0,0 +1,99 @@
#ifndef OT_LAYOUT_GPOS_PAIRVALUERECORD_HH
#define OT_LAYOUT_GPOS_PAIRVALUERECORD_HH
#include "ValueFormat.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
template <typename Types>
struct PairValueRecord
{
template <typename Types2>
friend struct PairSet;
protected:
typename Types::HBGlyphID
secondGlyph; /* GlyphID of second glyph in the
* pair--first glyph is listed in the
* Coverage table */
ValueRecord values; /* Positioning data for the first glyph
* followed by for second glyph */
public:
DEFINE_SIZE_ARRAY (Types::size, values);
int cmp (hb_codepoint_t k) const
{ return secondGlyph.cmp (k); }
struct context_t
{
const void *base;
const ValueFormat *valueFormats;
const ValueFormat *newFormats;
unsigned len1; /* valueFormats[0].get_len() */
const hb_map_t *glyph_map;
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map;
};
bool subset (hb_subset_context_t *c,
context_t *closure) const
{
TRACE_SERIALIZE (this);
auto *s = c->serializer;
auto *out = s->start_embed (*this);
if (unlikely (!s->extend_min (out))) return_trace (false);
out->secondGlyph = (*closure->glyph_map)[secondGlyph];
closure->valueFormats[0].copy_values (s,
closure->newFormats[0],
closure->base, &values[0],
closure->layout_variation_idx_delta_map);
closure->valueFormats[1].copy_values (s,
closure->newFormats[1],
closure->base,
&values[closure->len1],
closure->layout_variation_idx_delta_map);
return_trace (true);
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
const ValueFormat *valueFormats,
const void *base) const
{
unsigned record1_len = valueFormats[0].get_len ();
unsigned record2_len = valueFormats[1].get_len ();
const hb_array_t<const Value> values_array = values.as_array (record1_len + record2_len);
if (valueFormats[0].has_device ())
valueFormats[0].collect_variation_indices (c, base, values_array.sub_array (0, record1_len));
if (valueFormats[1].has_device ())
valueFormats[1].collect_variation_indices (c, base, values_array.sub_array (record1_len, record2_len));
}
bool intersects (const hb_set_t& glyphset) const
{
return glyphset.has(secondGlyph);
}
const Value* get_values_1 () const
{
return &values[0];
}
const Value* get_values_2 (ValueFormat format1) const
{
return &values[format1.get_len ()];
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_PAIRVALUERECORD_HH

View File

@ -38,17 +38,18 @@ struct SinglePos
void serialize (hb_serialize_context_t *c,
const SrcLookup* src,
Iterator glyph_val_iter_pairs,
const hb_map_t *layout_variation_idx_map)
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map,
bool all_axes_pinned)
{
if (unlikely (!c->extend_min (u.format))) return;
unsigned format = 2;
ValueFormat new_format = src->get_value_format ();
if (all_axes_pinned)
new_format = new_format.drop_device_table_flags ();
if (glyph_val_iter_pairs)
{
format = get_format (glyph_val_iter_pairs);
new_format = src->get_value_format ().get_effective_format (+ glyph_val_iter_pairs | hb_map (hb_second));
}
u.format = format;
switch (u.format) {
@ -56,13 +57,13 @@ struct SinglePos
src,
glyph_val_iter_pairs,
new_format,
layout_variation_idx_map);
layout_variation_idx_delta_map);
return;
case 2: u.format2.serialize (c,
src,
glyph_val_iter_pairs,
new_format,
layout_variation_idx_map);
layout_variation_idx_delta_map);
return;
default:return;
}
@ -71,8 +72,8 @@ struct SinglePos
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
@ -87,8 +88,9 @@ static void
SinglePos_serialize (hb_serialize_context_t *c,
const SrcLookup *src,
Iterator it,
const hb_map_t *layout_variation_idx_map)
{ c->start_embed<SinglePos> ()->serialize (c, src, it, layout_variation_idx_map); }
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map,
bool all_axes_pinned)
{ c->start_embed<SinglePos> ()->serialize (c, src, it, layout_variation_idx_delta_map, all_axes_pinned); }
}

View File

@ -39,12 +39,10 @@ struct SinglePosFormat1
{
if (!valueFormat.has_device ()) return;
auto it =
+ hb_iter (this+coverage)
| hb_filter (c->glyph_set)
;
hb_set_t intersection;
(this+coverage).intersect_set (*c->glyph_set, intersection);
if (!intersection) return;
if (!it) return;
valueFormat.collect_variation_indices (c, this, values.as_array (valueFormat.get_len ()));
}
@ -62,12 +60,44 @@ struct SinglePosFormat1
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
if (likely (index == NOT_COVERED)) return_trace (false);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"positioning glyph at %u",
c->buffer->idx);
}
valueFormat.apply_value (c, this, values, buffer->cur_pos());
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"positioned glyph at %u",
c->buffer->idx);
}
buffer->idx++;
return_trace (true);
}
bool
position_single (hb_font_t *font,
hb_direction_t direction,
hb_codepoint_t gid,
hb_glyph_position_t &pos) const
{
unsigned int index = (this+coverage).get_coverage (gid);
if (likely (index == NOT_COVERED)) return false;
/* This is ugly... */
hb_buffer_t buffer;
buffer.props.direction = direction;
OT::hb_ot_apply_context_t c (1, font, &buffer);
valueFormat.apply_value (&c, this, values, pos);
return true;
}
template<typename Iterator,
typename SrcLookup,
hb_requires (hb_is_iterator (Iterator))>
@ -75,7 +105,7 @@ struct SinglePosFormat1
const SrcLookup *src,
Iterator it,
ValueFormat newFormat,
const hb_map_t *layout_variation_idx_map)
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map)
{
if (unlikely (!c->extend_min (this))) return;
if (unlikely (!c->check_assign (valueFormat,
@ -84,7 +114,7 @@ struct SinglePosFormat1
for (const hb_array_t<const Value>& _ : + it | hb_map (hb_second))
{
src->get_value_format ().copy_values (c, newFormat, src, &_, layout_variation_idx_map);
src->get_value_format ().copy_values (c, newFormat, src, &_, layout_variation_idx_delta_map);
// Only serialize the first entry in the iterator, the rest are assumed to
// be the same.
break;
@ -104,15 +134,17 @@ struct SinglePosFormat1
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
hb_set_t intersection;
(this+coverage).intersect_set (glyphset, intersection);
auto it =
+ hb_iter (this+coverage)
| hb_filter (glyphset)
+ hb_iter (intersection)
| hb_map_retains_sorting (glyph_map)
| hb_zip (hb_repeat (values.as_array (valueFormat.get_len ())))
;
bool ret = bool (it);
SinglePos_serialize (c->serializer, this, it, c->plan->layout_variation_idx_map);
SinglePos_serialize (c->serializer, this, it, &c->plan->layout_variation_idx_delta_map, c->plan->all_axes_pinned);
return_trace (ret);
}
};

View File

@ -68,16 +68,52 @@ struct SinglePosFormat2
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
if (likely (index == NOT_COVERED)) return_trace (false);
if (likely (index >= valueCount)) return_trace (false);
if (unlikely (index >= valueCount)) return_trace (false);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"positioning glyph at %u",
c->buffer->idx);
}
valueFormat.apply_value (c, this,
&values[index * valueFormat.get_len ()],
buffer->cur_pos());
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"positioned glyph at %u",
c->buffer->idx);
}
buffer->idx++;
return_trace (true);
}
bool
position_single (hb_font_t *font,
hb_direction_t direction,
hb_codepoint_t gid,
hb_glyph_position_t &pos) const
{
unsigned int index = (this+coverage).get_coverage (gid);
if (likely (index == NOT_COVERED)) return false;
if (unlikely (index >= valueCount)) return false;
/* This is ugly... */
hb_buffer_t buffer;
buffer.props.direction = direction;
OT::hb_ot_apply_context_t c (1, font, &buffer);
valueFormat.apply_value (&c, this,
&values[index * valueFormat.get_len ()],
pos);
return true;
}
template<typename Iterator,
typename SrcLookup,
hb_requires (hb_is_iterator (Iterator))>
@ -85,7 +121,7 @@ struct SinglePosFormat2
const SrcLookup *src,
Iterator it,
ValueFormat newFormat,
const hb_map_t *layout_variation_idx_map)
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map)
{
auto out = c->extend_min (this);
if (unlikely (!out)) return;
@ -95,7 +131,7 @@ struct SinglePosFormat2
+ it
| hb_map (hb_second)
| hb_apply ([&] (hb_array_t<const Value> _)
{ src->get_value_format ().copy_values (c, newFormat, src, &_, layout_variation_idx_map); })
{ src->get_value_format ().copy_values (c, newFormat, src, &_, layout_variation_idx_delta_map); })
;
auto glyphs =
@ -127,7 +163,7 @@ struct SinglePosFormat2
;
bool ret = bool (it);
SinglePos_serialize (c->serializer, this, it, c->plan->layout_variation_idx_map);
SinglePos_serialize (c->serializer, this, it, &c->plan->layout_variation_idx_delta_map, c->plan->all_axes_pinned);
return_trace (ret);
}
};

View File

@ -59,6 +59,24 @@ struct ValueFormat : HBUINT16
unsigned int get_len () const { return hb_popcount ((unsigned int) *this); }
unsigned int get_size () const { return get_len () * Value::static_size; }
hb_vector_t<unsigned> get_device_table_indices () const {
unsigned i = 0;
hb_vector_t<unsigned> result;
unsigned format = *this;
if (format & xPlacement) i++;
if (format & yPlacement) i++;
if (format & xAdvance) i++;
if (format & yAdvance) i++;
if (format & xPlaDevice) result.push (i++);
if (format & yPlaDevice) result.push (i++);
if (format & xAdvDevice) result.push (i++);
if (format & yAdvDevice) result.push (i++);
return result;
}
bool apply_value (hb_ot_apply_context_t *c,
const void *base,
const Value *values,
@ -145,30 +163,50 @@ struct ValueFormat : HBUINT16
unsigned int new_format,
const void *base,
const Value *values,
const hb_map_t *layout_variation_idx_map) const
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map) const
{
unsigned int format = *this;
if (!format) return;
if (format & xPlacement) copy_value (c, new_format, xPlacement, *values++);
if (format & yPlacement) copy_value (c, new_format, yPlacement, *values++);
if (format & xAdvance) copy_value (c, new_format, xAdvance, *values++);
if (format & yAdvance) copy_value (c, new_format, yAdvance, *values++);
HBINT16 *x_placement = nullptr, *y_placement = nullptr, *x_adv = nullptr, *y_adv = nullptr;
if (format & xPlacement) x_placement = copy_value (c, new_format, xPlacement, *values++);
if (format & yPlacement) y_placement = copy_value (c, new_format, yPlacement, *values++);
if (format & xAdvance) x_adv = copy_value (c, new_format, xAdvance, *values++);
if (format & yAdvance) y_adv = copy_value (c, new_format, yAdvance, *values++);
if (format & xPlaDevice) copy_device (c, base, values++, layout_variation_idx_map);
if (format & yPlaDevice) copy_device (c, base, values++, layout_variation_idx_map);
if (format & xAdvDevice) copy_device (c, base, values++, layout_variation_idx_map);
if (format & yAdvDevice) copy_device (c, base, values++, layout_variation_idx_map);
if (format & xPlaDevice)
{
add_delta_to_value (x_placement, base, values, layout_variation_idx_delta_map);
copy_device (c, base, values++, layout_variation_idx_delta_map, new_format, xPlaDevice);
}
if (format & yPlaDevice)
{
add_delta_to_value (y_placement, base, values, layout_variation_idx_delta_map);
copy_device (c, base, values++, layout_variation_idx_delta_map, new_format, yPlaDevice);
}
if (format & xAdvDevice)
{
add_delta_to_value (x_adv, base, values, layout_variation_idx_delta_map);
copy_device (c, base, values++, layout_variation_idx_delta_map, new_format, xAdvDevice);
}
if (format & yAdvDevice)
{
add_delta_to_value (y_adv, base, values, layout_variation_idx_delta_map);
copy_device (c, base, values++, layout_variation_idx_delta_map, new_format, yAdvDevice);
}
}
void copy_value (hb_serialize_context_t *c,
unsigned int new_format,
Flags flag,
Value value) const
HBINT16* copy_value (hb_serialize_context_t *c,
unsigned int new_format,
Flags flag,
Value value) const
{
// Filter by new format.
if (!(new_format & flag)) return;
c->copy (value);
if (!(new_format & flag)) return nullptr;
return reinterpret_cast<HBINT16 *> (c->copy (value));
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
@ -183,31 +221,40 @@ struct ValueFormat : HBUINT16
if (format & yAdvance) i++;
if (format & xPlaDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices);
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
if (format & ValueFormat::yPlaDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices);
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
if (format & ValueFormat::xAdvDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices);
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
if (format & ValueFormat::yAdvDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices);
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
}
unsigned drop_device_table_flags () const
{
unsigned format = *this;
for (unsigned flag = xPlaDevice; flag <= yAdvDevice; flag = flag << 1)
format = format & ~flag;
return format;
}
private:
bool sanitize_value_devices (hb_sanitize_context_t *c, const void *base, const Value *values) const
{
@ -236,9 +283,27 @@ struct ValueFormat : HBUINT16
return *static_cast<const Offset16To<Device> *> (value);
}
bool copy_device (hb_serialize_context_t *c, const void *base,
const Value *src_value, const hb_map_t *layout_variation_idx_map) const
void add_delta_to_value (HBINT16 *value,
const void *base,
const Value *src_value,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map) const
{
if (!value) return;
unsigned varidx = (base + get_device (src_value)).get_variation_index ();
hb_pair_t<unsigned, int> *varidx_delta;
if (!layout_variation_idx_delta_map->has (varidx, &varidx_delta)) return;
*value += hb_second (*varidx_delta);
}
bool copy_device (hb_serialize_context_t *c, const void *base,
const Value *src_value,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map,
unsigned int new_format, Flags flag) const
{
// Filter by new format.
if (!(new_format & flag)) return true;
Value *dst_value = c->copy (*src_value);
if (!dst_value) return false;
@ -246,7 +311,7 @@ struct ValueFormat : HBUINT16
*dst_value = 0;
c->push ();
if ((base + get_device (src_value)).copy (c, layout_variation_idx_map))
if ((base + get_device (src_value)).copy (c, layout_variation_idx_delta_map))
{
c->add_link (*dst_value, c->pop_pack ());
return true;
@ -306,7 +371,7 @@ struct ValueFormat : HBUINT16
for (unsigned int i = 0; i < count; i++) {
if (!sanitize_value_devices (c, base, values))
return_trace (false);
values += stride;
values = &StructAtOffset<const Value> (values, stride);
}
return_trace (true);

View File

@ -5,12 +5,13 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
template <typename Types>
struct AlternateSet
{
protected:
Array16Of<HBGlyphID16>
Array16Of<typename Types::HBGlyphID>
alternates; /* Array of alternate GlyphIDs--in
* arbitrary order */
public:
@ -56,8 +57,23 @@ struct AlternateSet
if (unlikely (alt_index > count || alt_index == 0)) return_trace (false);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"replacing glyph at %u (alternate substitution)",
c->buffer->idx);
}
c->replace_glyph (alternates[alt_index - 1]);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (alternate substitution)",
c->buffer->idx - 1u);
}
return_trace (true);
}
@ -68,7 +84,7 @@ struct AlternateSet
{
if (alternates.len && alternate_count)
{
+ alternates.sub_array (start_offset, alternate_count)
+ alternates.as_array ().sub_array (start_offset, alternate_count)
| hb_sink (hb_array (alternate_glyphs, *alternate_count))
;
}

View File

@ -6,28 +6,36 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct AlternateSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
AlternateSubstFormat1 format1;
HBUINT16 format; /* Format identifier */
AlternateSubstFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
AlternateSubstFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
/* TODO This function is unused and not updated to 24bit GIDs. Should be done by using
* iterators. While at it perhaps using iterator of arrays of hb_codepoint_t instead. */
bool serialize (hb_serialize_context_t *c,
hb_sorted_array_t<const HBGlyphID16> glyphs,
hb_array_t<const unsigned int> alternate_len_list,
@ -42,6 +50,9 @@ struct AlternateSubst
default:return_trace (false);
}
}
/* TODO subset() should choose format. */
};
}

View File

@ -6,20 +6,21 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct AlternateSubstFormat1
template <typename Types>
struct AlternateSubstFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of Substitution table */
Array16OfOffset16To<AlternateSet>
Array16Of<typename Types::template OffsetTo<AlternateSet<Types>>>
alternateSet; /* Array of AlternateSet tables
* ordered by Coverage Index */
public:
DEFINE_SIZE_ARRAY (6, alternateSet);
DEFINE_SIZE_ARRAY (2 + 2 * Types::size, alternateSet);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -39,9 +40,8 @@ struct AlternateSubstFormat1
| hb_filter (c->parent_active_glyphs (), hb_first)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const AlternateSet &_) { _.closure (c); })
| hb_apply ([c] (const AlternateSet<Types> &_) { _.closure (c); })
;
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
@ -52,7 +52,7 @@ struct AlternateSubstFormat1
+ hb_zip (this+coverage, alternateSet)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const AlternateSet &_) { _.collect_glyphs (c); })
| hb_apply ([c] (const AlternateSet<Types> &_) { _.collect_glyphs (c); })
;
}

View File

@ -7,7 +7,7 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct ChainContextSubst : ChainContext {};

View File

@ -6,7 +6,7 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
typedef hb_pair_t<hb_codepoint_t, hb_codepoint_t> hb_codepoint_pair_t;

View File

@ -7,7 +7,7 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct ContextSubst : Context {};

View File

@ -7,7 +7,7 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct ExtensionSubst : Extension<ExtensionSubst>
{

View File

@ -1,16 +1,15 @@
#ifndef OT_LAYOUT_GSUB_GSUB_HH
#define OT_LAYOUT_GSUB_GSUB_HH
// TODO(garretrieger): move to new layout.
#include "../../../hb-ot-layout-gsubgpos.hh"
#include "Common.hh"
#include "SubstLookup.hh"
using OT::Layout::GSUB::SubstLookup;
namespace OT {
using Layout::GSUB_impl::SubstLookup;
namespace Layout {
namespace GSUB {
/*
* GSUB -- Glyph Substitution
@ -28,12 +27,15 @@ struct GSUB : GSUBGPOS
bool subset (hb_subset_context_t *c) const
{
hb_subset_layout_context_t l (c, tableTag, c->plan->gsub_lookups, c->plan->gsub_langsys, c->plan->gsub_features);
hb_subset_layout_context_t l (c, tableTag);
return GSUBGPOS::subset<SubstLookup> (&l);
}
bool sanitize (hb_sanitize_context_t *c) const
{ return GSUBGPOS::sanitize<SubstLookup> (c); }
{
TRACE_SANITIZE (this);
return_trace (GSUBGPOS::sanitize<SubstLookup> (c));
}
HB_INTERNAL bool is_blocklisted (hb_blob_t *blob,
hb_face_t *face) const;
@ -47,11 +49,10 @@ struct GSUB : GSUBGPOS
};
}
}
struct GSUB_accelerator_t : Layout::GSUB::GSUB::accelerator_t {
GSUB_accelerator_t (hb_face_t *face) : Layout::GSUB::GSUB::accelerator_t (face) {}
struct GSUB_accelerator_t : Layout::GSUB::accelerator_t {
GSUB_accelerator_t (hb_face_t *face) : Layout::GSUB::accelerator_t (face) {}
};

View File

@ -5,18 +5,20 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
template <typename Types>
struct Ligature
{
protected:
HBGlyphID16 ligGlyph; /* GlyphID of ligature to substitute */
HeadlessArrayOf<HBGlyphID16>
typename Types::HBGlyphID
ligGlyph; /* GlyphID of ligature to substitute */
HeadlessArrayOf<typename Types::HBGlyphID>
component; /* Array of component GlyphIDs--start
* with the second component--ordered
* in writing direction */
public:
DEFINE_SIZE_ARRAY (4, component);
DEFINE_SIZE_ARRAY (Types::size + 2, component);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -62,7 +64,24 @@ struct Ligature
* as a "ligated" substitution. */
if (unlikely (count == 1))
{
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"replacing glyph at %u (ligature substitution)",
c->buffer->idx);
}
c->replace_glyph (ligGlyph);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (ligature substitution)",
c->buffer->idx - 1u);
}
return_trace (true);
}
@ -83,6 +102,31 @@ struct Ligature
return_trace (false);
}
unsigned pos = 0;
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
unsigned delta = c->buffer->sync_so_far ();
pos = c->buffer->idx;
char buf[HB_MAX_CONTEXT_LENGTH * 16] = {0};
char *p = buf;
match_end += delta;
for (unsigned i = 0; i < count; i++)
{
match_positions[i] += delta;
if (i)
*p++ = ',';
snprintf (p, sizeof(buf) - (p - buf), "%u", match_positions[i]);
p += strlen(p);
}
c->buffer->message (c->font,
"ligating glyphs at %s",
buf);
}
ligate_input (c,
count,
match_positions,
@ -90,6 +134,14 @@ struct Ligature
ligGlyph,
total_component_count);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"ligated glyph at %u",
pos);
}
return_trace (true);
}

View File

@ -6,12 +6,13 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
template <typename Types>
struct LigatureSet
{
protected:
Array16OfOffset16To<Ligature>
Array16OfOffset16To<Ligature<Types>>
ligature; /* Array LigatureSet tables
* ordered by preference */
public:
@ -28,7 +29,7 @@ struct LigatureSet
return
+ hb_iter (ligature)
| hb_map (hb_add (this))
| hb_map ([glyphs] (const Ligature &_) { return _.intersects (glyphs); })
| hb_map ([glyphs] (const Ligature<Types> &_) { return _.intersects (glyphs); })
| hb_any
;
}
@ -37,7 +38,7 @@ struct LigatureSet
{
+ hb_iter (ligature)
| hb_map (hb_add (this))
| hb_apply ([c] (const Ligature &_) { _.closure (c); })
| hb_apply ([c] (const Ligature<Types> &_) { _.closure (c); })
;
}
@ -45,7 +46,7 @@ struct LigatureSet
{
+ hb_iter (ligature)
| hb_map (hb_add (this))
| hb_apply ([c] (const Ligature &_) { _.collect_glyphs (c); })
| hb_apply ([c] (const Ligature<Types> &_) { _.collect_glyphs (c); })
;
}
@ -54,7 +55,7 @@ struct LigatureSet
return
+ hb_iter (ligature)
| hb_map (hb_add (this))
| hb_map ([c] (const Ligature &_) { return _.would_apply (c); })
| hb_map ([c] (const Ligature<Types> &_) { return _.would_apply (c); })
| hb_any
;
}
@ -65,7 +66,7 @@ struct LigatureSet
unsigned int num_ligs = ligature.len;
for (unsigned int i = 0; i < num_ligs; i++)
{
const Ligature &lig = this+ligature[i];
const auto &lig = this+ligature[i];
if (lig.apply (c)) return_trace (true);
}

View File

@ -6,28 +6,37 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct LigatureSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
LigatureSubstFormat1 format1;
HBUINT16 format; /* Format identifier */
LigatureSubstFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
LigatureSubstFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
/* TODO This function is only used by small GIDs, and not updated to 24bit GIDs. Should
* be done by using iterators. While at it perhaps using iterator of arrays of hb_codepoint_t
* instead. */
bool serialize (hb_serialize_context_t *c,
hb_sorted_array_t<const HBGlyphID16> first_glyphs,
hb_array_t<const unsigned int> ligature_per_first_glyph_count_list,
@ -49,6 +58,9 @@ struct LigatureSubst
default:return_trace (false);
}
}
/* TODO subset() should choose format. */
};

View File

@ -6,20 +6,21 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct LigatureSubstFormat1
template <typename Types>
struct LigatureSubstFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of Substitution table */
Array16OfOffset16To<LigatureSet>
Array16Of<typename Types::template OffsetTo<LigatureSet<Types>>>
ligatureSet; /* Array LigatureSet tables
* ordered by Coverage Index */
public:
DEFINE_SIZE_ARRAY (6, ligatureSet);
DEFINE_SIZE_ARRAY (4 + Types::size, ligatureSet);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -33,7 +34,7 @@ struct LigatureSubstFormat1
+ hb_zip (this+coverage, ligatureSet)
| hb_filter (*glyphs, hb_first)
| hb_map (hb_second)
| hb_map ([this, glyphs] (const Offset16To<LigatureSet> &_)
| hb_map ([this, glyphs] (const typename Types::template OffsetTo<LigatureSet<Types>> &_)
{ return (this+_).intersects (glyphs); })
| hb_any
;
@ -48,7 +49,7 @@ struct LigatureSubstFormat1
| hb_filter (c->parent_active_glyphs (), hb_first)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const LigatureSet &_) { _.closure (c); })
| hb_apply ([c] (const LigatureSet<Types> &_) { _.closure (c); })
;
}
@ -62,7 +63,7 @@ struct LigatureSubstFormat1
+ hb_zip (this+coverage, ligatureSet)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const LigatureSet &_) { _.collect_glyphs (c); })
| hb_apply ([c] (const LigatureSet<Types> &_) { _.collect_glyphs (c); })
;
}
@ -73,7 +74,7 @@ struct LigatureSubstFormat1
unsigned int index = (this+coverage).get_coverage (c->glyphs[0]);
if (likely (index == NOT_COVERED)) return false;
const LigatureSet &lig_set = this+ligatureSet[index];
const auto &lig_set = this+ligatureSet[index];
return lig_set.would_apply (c);
}
@ -84,7 +85,7 @@ struct LigatureSubstFormat1
unsigned int index = (this+coverage).get_coverage (c->buffer->cur ().codepoint);
if (likely (index == NOT_COVERED)) return_trace (false);
const LigatureSet &lig_set = this+ligatureSet[index];
const auto &lig_set = this+ligatureSet[index];
return_trace (lig_set.apply (c));
}
@ -128,7 +129,7 @@ struct LigatureSubstFormat1
hb_set_t new_coverage;
+ hb_zip (this+coverage, hb_iter (ligatureSet) | hb_map (hb_add (this)))
| hb_filter (glyphset, hb_first)
| hb_filter ([&] (const LigatureSet& _) {
| hb_filter ([&] (const LigatureSet<Types>& _) {
return _.intersects (&glyphset);
}, hb_second)
| hb_map (hb_first)

View File

@ -6,14 +6,17 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct MultipleSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
MultipleSubstFormat1 format1;
HBUINT16 format; /* Format identifier */
MultipleSubstFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MultipleSubstFormat1_2<MediumTypes> format2;
#endif
} u;
public:
@ -21,28 +24,34 @@ struct MultipleSubst
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
template<typename Iterator,
hb_requires (hb_is_sorted_iterator (Iterator))>
bool serialize (hb_serialize_context_t *c,
hb_sorted_array_t<const HBGlyphID16> glyphs,
hb_array_t<const unsigned int> substitute_len_list,
hb_array_t<const HBGlyphID16> substitute_glyphs_list)
Iterator it)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false);
unsigned int format = 1;
u.format = format;
switch (u.format) {
case 1: return_trace (u.format1.serialize (c, glyphs, substitute_len_list, substitute_glyphs_list));
case 1: return_trace (u.format1.serialize (c, it));
default:return_trace (false);
}
}
/* TODO subset() should choose format. */
};

View File

@ -6,20 +6,21 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct MultipleSubstFormat1
template <typename Types>
struct MultipleSubstFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of Substitution table */
Array16OfOffset16To<Sequence>
Array16Of<typename Types::template OffsetTo<Sequence<Types>>>
sequence; /* Array of Sequence tables
* ordered by Coverage Index */
public:
DEFINE_SIZE_ARRAY (6, sequence);
DEFINE_SIZE_ARRAY (4 + Types::size, sequence);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -39,7 +40,7 @@ struct MultipleSubstFormat1
| hb_filter (c->parent_active_glyphs (), hb_first)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const Sequence &_) { _.closure (c); })
| hb_apply ([c] (const Sequence<Types> &_) { _.closure (c); })
;
}
@ -51,7 +52,7 @@ struct MultipleSubstFormat1
+ hb_zip (this+coverage, sequence)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const Sequence &_) { _.collect_glyphs (c); })
| hb_apply ([c] (const Sequence<Types> &_) { _.collect_glyphs (c); })
;
}
@ -70,22 +71,31 @@ struct MultipleSubstFormat1
return_trace ((this+sequence[index]).apply (c));
}
template<typename Iterator,
hb_requires (hb_is_sorted_iterator (Iterator))>
bool serialize (hb_serialize_context_t *c,
hb_sorted_array_t<const HBGlyphID16> glyphs,
hb_array_t<const unsigned int> substitute_len_list,
hb_array_t<const HBGlyphID16> substitute_glyphs_list)
Iterator it)
{
TRACE_SERIALIZE (this);
auto sequences =
+ it
| hb_map (hb_second)
;
auto glyphs =
+ it
| hb_map_retains_sorting (hb_first)
;
if (unlikely (!c->extend_min (this))) return_trace (false);
if (unlikely (!sequence.serialize (c, glyphs.length))) return_trace (false);
for (unsigned int i = 0; i < glyphs.length; i++)
if (unlikely (!sequence.serialize (c, sequences.length))) return_trace (false);
for (auto& pair : hb_zip (sequences, sequence))
{
unsigned int substitute_len = substitute_len_list[i];
if (unlikely (!sequence[i]
.serialize_serialize (c, substitute_glyphs_list.sub_array (0, substitute_len))))
if (unlikely (!pair.second
.serialize_serialize (c, pair.first)))
return_trace (false);
substitute_glyphs_list += substitute_len;
}
return_trace (coverage.serialize_serialize (c, glyphs));
}

View File

@ -6,7 +6,7 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct ReverseChainSingleSubst
{
@ -20,8 +20,8 @@ struct ReverseChainSingleSubst
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());

View File

@ -5,7 +5,7 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct ReverseChainSingleSubstFormat1
{
@ -33,10 +33,10 @@ struct ReverseChainSingleSubstFormat1
TRACE_SANITIZE (this);
if (!(coverage.sanitize (c, this) && backtrack.sanitize (c, this)))
return_trace (false);
const Array16OfOffset16To<Coverage> &lookahead = StructAfter<Array16OfOffset16To<Coverage>> (backtrack);
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
if (!lookahead.sanitize (c, this))
return_trace (false);
const Array16Of<HBGlyphID16> &substitute = StructAfter<Array16Of<HBGlyphID16>> (lookahead);
const auto &substitute = StructAfter<decltype (substituteX)> (lookahead);
return_trace (substitute.sanitize (c));
}
@ -45,7 +45,7 @@ struct ReverseChainSingleSubstFormat1
if (!(this+coverage).intersects (glyphs))
return false;
const Array16OfOffset16To<Coverage> &lookahead = StructAfter<Array16OfOffset16To<Coverage>> (backtrack);
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
unsigned int count;
@ -69,8 +69,8 @@ struct ReverseChainSingleSubstFormat1
{
if (!intersects (c->glyphs)) return;
const Array16OfOffset16To<Coverage> &lookahead = StructAfter<Array16OfOffset16To<Coverage>> (backtrack);
const Array16Of<HBGlyphID16> &substitute = StructAfter<Array16Of<HBGlyphID16>> (lookahead);
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
const auto &substitute = StructAfter<decltype (substituteX)> (lookahead);
+ hb_zip (this+coverage, substitute)
| hb_filter (c->parent_active_glyphs (), hb_first)
@ -91,12 +91,12 @@ struct ReverseChainSingleSubstFormat1
for (unsigned int i = 0; i < count; i++)
if (unlikely (!(this+backtrack[i]).collect_coverage (c->before))) return;
const Array16OfOffset16To<Coverage> &lookahead = StructAfter<Array16OfOffset16To<Coverage>> (backtrack);
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
count = lookahead.len;
for (unsigned int i = 0; i < count; i++)
if (unlikely (!(this+lookahead[i]).collect_coverage (c->after))) return;
const Array16Of<HBGlyphID16> &substitute = StructAfter<Array16Of<HBGlyphID16>> (lookahead);
const auto &substitute = StructAfter<decltype (substituteX)> (lookahead);
count = substitute.len;
c->output->add_array (substitute.arrayZ, substitute.len);
}
@ -115,8 +115,8 @@ struct ReverseChainSingleSubstFormat1
unsigned int index = (this+coverage).get_coverage (c->buffer->cur ().codepoint);
if (likely (index == NOT_COVERED)) return_trace (false);
const Array16OfOffset16To<Coverage> &lookahead = StructAfter<Array16OfOffset16To<Coverage>> (backtrack);
const Array16Of<HBGlyphID16> &substitute = StructAfter<Array16Of<HBGlyphID16>> (lookahead);
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
const auto &substitute = StructAfter<decltype (substituteX)> (lookahead);
if (unlikely (index >= substitute.len)) return_trace (false);
@ -131,7 +131,23 @@ struct ReverseChainSingleSubstFormat1
c->buffer->idx + 1, &end_index))
{
c->buffer->unsafe_to_break_from_outbuffer (start_index, end_index);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replacing glyph at %u (reverse chaining substitution)",
c->buffer->idx);
}
c->replace_glyph_inplace (substitute[index]);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (reverse chaining substitution)",
c->buffer->idx);
}
/* Note: We DON'T decrease buffer->idx. The main loop does it
* for us. This is useful for preventing surprises if someone
* calls us through a Context lookup. */
@ -206,8 +222,8 @@ struct ReverseChainSingleSubstFormat1
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
const Array16OfOffset16To<Coverage> &lookahead = StructAfter<Array16OfOffset16To<Coverage>> (backtrack);
const Array16Of<HBGlyphID16> &substitute = StructAfter<Array16Of<HBGlyphID16>> (lookahead);
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
const auto &substitute = StructAfter<decltype (substituteX)> (lookahead);
auto it =
+ hb_zip (this+coverage, substitute)

View File

@ -5,12 +5,13 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
template <typename Types>
struct Sequence
{
protected:
Array16Of<HBGlyphID16>
Array16Of<typename Types::HBGlyphID>
substitute; /* String of GlyphIDs to substitute */
public:
DEFINE_SIZE_ARRAY (2, substitute);
@ -39,17 +40,58 @@ struct Sequence
* as a "multiplied" substitution. */
if (unlikely (count == 1))
{
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"replacing glyph at %u (multiple substitution)",
c->buffer->idx);
}
c->replace_glyph (substitute.arrayZ[0]);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (multiple subtitution)",
c->buffer->idx - 1u);
}
return_trace (true);
}
/* Spec disallows this, but Uniscribe allows it.
* https://github.com/harfbuzz/harfbuzz/issues/253 */
else if (unlikely (count == 0))
{
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"deleting glyph at %u (multiple substitution)",
c->buffer->idx);
}
c->buffer->delete_glyph ();
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"deleted glyph at %u (multiple substitution)",
c->buffer->idx);
}
return_trace (true);
}
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"multiplying glyph at %u",
c->buffer->idx);
}
unsigned int klass = _hb_glyph_info_is_ligature (&c->buffer->cur()) ?
HB_OT_LAYOUT_GLYPH_PROPS_BASE_GLYPH : 0;
unsigned lig_id = _hb_glyph_info_get_lig_id (&c->buffer->cur());
@ -64,6 +106,26 @@ struct Sequence
}
c->buffer->skip_glyph ();
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
char buf[HB_MAX_CONTEXT_LENGTH * 16] = {0};
char *p = buf;
for (unsigned i = c->buffer->idx - count; i < c->buffer->idx; i++)
{
if (buf < p)
*p++ = ',';
snprintf (p, sizeof(buf) - (p - buf), "%u", i);
p += strlen(p);
}
c->buffer->message (c->font,
"multiplied glyphs at %s",
buf);
}
return_trace (true);
}

View File

@ -7,15 +7,19 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct SingleSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
SingleSubstFormat1 format1;
SingleSubstFormat2 format2;
HBUINT16 format; /* Format identifier */
SingleSubstFormat1_3<SmallTypes> format1;
SingleSubstFormat2_4<SmallTypes> format2;
#ifndef HB_NO_BEYOND_64K
SingleSubstFormat1_3<MediumTypes> format3;
SingleSubstFormat2_4<MediumTypes> format4;
#endif
} u;
public:
@ -23,11 +27,15 @@ struct SingleSubst
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 3: return_trace (c->dispatch (u.format3, std::forward<Ts> (ds)...));
case 4: return_trace (c->dispatch (u.format4, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
@ -45,11 +53,24 @@ struct SingleSubst
if (glyphs)
{
format = 1;
hb_codepoint_t mask = 0xFFFFu;
#ifndef HB_NO_BEYOND_64K
if (+ glyphs
| hb_map_retains_sorting (hb_first)
| hb_filter ([] (hb_codepoint_t gid) { return gid > 0xFFFFu; }))
{
format += 2;
mask = 0xFFFFFFu;
}
#endif
auto get_delta = [=] (hb_codepoint_pair_t _)
{ return (unsigned) (_.second - _.first) & 0xFFFF; };
{ return (unsigned) (_.second - _.first) & mask; };
delta = get_delta (*glyphs);
if (!hb_all (++(+glyphs), delta, get_delta)) format = 2;
if (!hb_all (++(+glyphs), delta, get_delta)) format += 1;
}
u.format = format;
switch (u.format) {
case 1: return_trace (u.format1.serialize (c,
@ -57,6 +78,13 @@ struct SingleSubst
| hb_map_retains_sorting (hb_first),
delta));
case 2: return_trace (u.format2.serialize (c, glyphs));
#ifndef HB_NO_BEYOND_64K
case 3: return_trace (u.format3.serialize (c,
+ glyphs
| hb_map_retains_sorting (hb_first),
delta));
case 4: return_trace (u.format4.serialize (c, glyphs));
#endif
default:return_trace (false);
}
}

View File

@ -5,20 +5,22 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct SingleSubstFormat1
template <typename Types>
struct SingleSubstFormat1_3
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of Substitution table */
HBUINT16 deltaGlyphID; /* Add to original GlyphID to get
typename Types::HBUINT
deltaGlyphID; /* Add to original GlyphID to get
* substitute GlyphID, modulo 0x10000 */
public:
DEFINE_SIZE_STATIC (6);
DEFINE_SIZE_STATIC (2 + 2 * Types::size);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -26,6 +28,9 @@ struct SingleSubstFormat1
return_trace (coverage.sanitize (c, this) && deltaGlyphID.sanitize (c));
}
hb_codepoint_t get_mask () const
{ return (1 << (8 * Types::size)) - 1; }
bool intersects (const hb_set_t *glyphs) const
{ return (this+coverage).intersects (glyphs); }
@ -34,14 +39,33 @@ struct SingleSubstFormat1
void closure (hb_closure_context_t *c) const
{
unsigned d = deltaGlyphID;
hb_codepoint_t d = deltaGlyphID;
hb_codepoint_t mask = get_mask ();
+ hb_iter (this+coverage)
| hb_filter (c->parent_active_glyphs ())
| hb_map ([d] (hb_codepoint_t g) { return (g + d) & 0xFFFFu; })
/* Help fuzzer avoid this function as much. */
unsigned pop = (this+coverage).get_population ();
if (pop >= mask)
return;
hb_set_t intersection;
(this+coverage).intersect_set (c->parent_active_glyphs (), intersection);
/* In degenerate fuzzer-found fonts, but not real fonts,
* this table can keep adding new glyphs in each round of closure.
* Refuse to close-over, if it maps glyph range to overlapping range. */
hb_codepoint_t min_before = intersection.get_min ();
hb_codepoint_t max_before = intersection.get_max ();
hb_codepoint_t min_after = (min_before + d) & mask;
hb_codepoint_t max_after = (max_before + d) & mask;
if (intersection.get_population () == max_before - min_before + 1 &&
((min_before <= min_after && min_after <= max_before) ||
(min_before <= max_after && max_after <= max_before)))
return;
+ hb_iter (intersection)
| hb_map ([d, mask] (hb_codepoint_t g) { return (g + d) & mask; })
| hb_sink (c->output)
;
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
@ -49,9 +73,11 @@ struct SingleSubstFormat1
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+coverage).collect_coverage (c->input))) return;
unsigned d = deltaGlyphID;
hb_codepoint_t d = deltaGlyphID;
hb_codepoint_t mask = get_mask ();
+ hb_iter (this+coverage)
| hb_map ([d] (hb_codepoint_t g) { return (g + d) & 0xFFFFu; })
| hb_map ([d, mask] (hb_codepoint_t g) { return (g + d) & mask; })
| hb_sink (c->output)
;
}
@ -68,11 +94,28 @@ struct SingleSubstFormat1
unsigned int index = (this+coverage).get_coverage (glyph_id);
if (likely (index == NOT_COVERED)) return_trace (false);
/* According to the Adobe Annotated OpenType Suite, result is always
* limited to 16bit. */
glyph_id = (glyph_id + deltaGlyphID) & 0xFFFFu;
hb_codepoint_t d = deltaGlyphID;
hb_codepoint_t mask = get_mask ();
glyph_id = (glyph_id + d) & mask;
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"replacing glyph at %u (single substitution)",
c->buffer->idx);
}
c->replace_glyph (glyph_id);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (single substitution)",
c->buffer->idx - 1u);
}
return_trace (true);
}
@ -95,14 +138,17 @@ struct SingleSubstFormat1
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
hb_codepoint_t delta = deltaGlyphID;
hb_codepoint_t d = deltaGlyphID;
hb_codepoint_t mask = get_mask ();
hb_set_t intersection;
(this+coverage).intersect_set (glyphset, intersection);
auto it =
+ hb_iter (this+coverage)
| hb_filter (glyphset)
| hb_map_retains_sorting ([&] (hb_codepoint_t g) {
+ hb_iter (intersection)
| hb_map_retains_sorting ([d, mask] (hb_codepoint_t g) {
return hb_codepoint_pair_t (g,
(g + delta) & 0xFFFF); })
(g + d) & mask); })
| hb_filter (glyphset, hb_second)
| hb_map_retains_sorting ([&] (hb_codepoint_pair_t p) -> hb_codepoint_pair_t
{ return hb_pair (glyph_map[p.first], glyph_map[p.second]); })

View File

@ -5,21 +5,22 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct SingleSubstFormat2
template <typename Types>
struct SingleSubstFormat2_4
{
protected:
HBUINT16 format; /* Format identifier--format = 2 */
Offset16To<Coverage>
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of Substitution table */
Array16Of<HBGlyphID16>
Array16Of<typename Types::HBGlyphID>
substitute; /* Array of substitute
* GlyphIDs--ordered by Coverage Index */
public:
DEFINE_SIZE_ARRAY (6, substitute);
DEFINE_SIZE_ARRAY (4 + Types::size, substitute);
bool sanitize (hb_sanitize_context_t *c) const
{
@ -35,12 +36,27 @@ struct SingleSubstFormat2
void closure (hb_closure_context_t *c) const
{
+ hb_zip (this+coverage, substitute)
| hb_filter (c->parent_active_glyphs (), hb_first)
auto &cov = this+coverage;
auto &glyph_set = c->parent_active_glyphs ();
if (substitute.len > glyph_set.get_population () * 4)
{
for (auto g : glyph_set)
{
unsigned i = cov.get_coverage (g);
if (i == NOT_COVERED || i >= substitute.len)
continue;
c->output->add (substitute.arrayZ[i]);
}
return;
}
+ hb_zip (cov, substitute)
| hb_filter (glyph_set, hb_first)
| hb_map (hb_second)
| hb_sink (c->output)
;
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
@ -67,8 +83,23 @@ struct SingleSubstFormat2
if (unlikely (index >= substitute.len)) return_trace (false);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"replacing glyph at %u (single substitution)",
c->buffer->idx);
}
c->replace_glyph (substitute[index]);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (single substitution)",
c->buffer->idx - 1u);
}
return_trace (true);
}
@ -103,7 +134,7 @@ struct SingleSubstFormat2
+ hb_zip (this+coverage, substitute)
| hb_filter (glyphset, hb_first)
| hb_filter (glyphset, hb_second)
| hb_map_retains_sorting ([&] (hb_pair_t<hb_codepoint_t, const HBGlyphID16 &> p) -> hb_codepoint_pair_t
| hb_map_retains_sorting ([&] (hb_pair_t<hb_codepoint_t, const typename Types::HBGlyphID &> p) -> hb_codepoint_pair_t
{ return hb_pair (glyph_map[p.first], glyph_map[p.second]); })
;

View File

@ -6,7 +6,7 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct SubstLookup : Lookup
{
@ -25,7 +25,7 @@ struct SubstLookup : Lookup
{
unsigned int type = get_type ();
if (unlikely (type == SubTable::Extension))
return reinterpret_cast<const ExtensionSubst &> (get_subtable (0)).is_reverse ();
return get_subtable (0).u.extension.is_reverse ();
return lookup_type_is_reverse (type);
}
@ -98,10 +98,15 @@ struct SubstLookup : Lookup
return dispatch (c);
}
template<typename Glyphs, typename Substitutes,
hb_requires (hb_is_sorted_source_of (Glyphs,
const hb_codepoint_t) &&
hb_is_source_of (Substitutes,
const hb_codepoint_t))>
bool serialize_single (hb_serialize_context_t *c,
uint32_t lookup_props,
hb_sorted_array_t<const HBGlyphID16> glyphs,
hb_array_t<const HBGlyphID16> substitutes)
Glyphs glyphs,
Substitutes substitutes)
{
TRACE_SERIALIZE (this);
if (unlikely (!Lookup::serialize (c, SubTable::Single, lookup_props, 1))) return_trace (false);
@ -114,19 +119,16 @@ struct SubstLookup : Lookup
return_trace (false);
}
bool serialize_multiple (hb_serialize_context_t *c,
uint32_t lookup_props,
hb_sorted_array_t<const HBGlyphID16> glyphs,
hb_array_t<const unsigned int> substitute_len_list,
hb_array_t<const HBGlyphID16> substitute_glyphs_list)
template<typename Iterator,
hb_requires (hb_is_sorted_iterator (Iterator))>
bool serialize (hb_serialize_context_t *c,
uint32_t lookup_props,
Iterator it)
{
TRACE_SERIALIZE (this);
if (unlikely (!Lookup::serialize (c, SubTable::Multiple, lookup_props, 1))) return_trace (false);
if (c->push<SubTable> ()->u.multiple.
serialize (c,
glyphs,
substitute_len_list,
substitute_glyphs_list))
serialize (c, it))
{
c->add_link (get_subtables<SubTable> ()[0], c->pop_pack ());
return_trace (true);

View File

@ -13,7 +13,7 @@
namespace OT {
namespace Layout {
namespace GSUB {
namespace GSUB_impl {
struct SubstLookupSubTable
{

View File

@ -0,0 +1,66 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod, Garret Rieger
*/
#ifndef OT_LAYOUT_TYPES_HH
#define OT_LAYOUT_TYPES_HH
namespace OT {
namespace Layout {
struct SmallTypes {
static constexpr unsigned size = 2;
using large_int = uint32_t;
using HBUINT = HBUINT16;
using HBGlyphID = HBGlyphID16;
using Offset = Offset16;
template <typename Type, bool has_null=true>
using OffsetTo = OT::Offset16To<Type, has_null>;
template <typename Type>
using ArrayOf = OT::Array16Of<Type>;
template <typename Type>
using SortedArrayOf = OT::SortedArray16Of<Type>;
};
struct MediumTypes {
static constexpr unsigned size = 3;
using large_int = uint64_t;
using HBUINT = HBUINT24;
using HBGlyphID = HBGlyphID24;
using Offset = Offset24;
template <typename Type, bool has_null=true>
using OffsetTo = OT::Offset24To<Type, has_null>;
template <typename Type>
using ArrayOf = OT::Array24Of<Type>;
template <typename Type>
using SortedArrayOf = OT::SortedArray24Of<Type>;
};
}
}
#endif /* OT_LAYOUT_TYPES_HH */

View File

@ -3,6 +3,7 @@
#include "../../hb-open-type.hh"
#include "composite-iter.hh"
namespace OT {
@ -25,13 +26,20 @@ struct CompositeGlyphRecord
USE_MY_METRICS = 0x0200,
OVERLAP_COMPOUND = 0x0400,
SCALED_COMPONENT_OFFSET = 0x0800,
UNSCALED_COMPONENT_OFFSET = 0x1000
UNSCALED_COMPONENT_OFFSET = 0x1000,
#ifndef HB_NO_BEYOND_64K
GID_IS_24BIT = 0x2000
#endif
};
public:
unsigned int get_size () const
{
unsigned int size = min_size;
/* glyphIndex is 24bit instead of 16bit */
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT) size += HBGlyphID24::static_size - HBGlyphID16::static_size;
#endif
/* arg1 and 2 are int16 */
if (flags & ARG_1_AND_2_ARE_WORDS) size += 4;
/* arg1 and 2 are int8 */
@ -60,7 +68,13 @@ struct CompositeGlyphRecord
bool is_anchored () const { return !(flags & ARGS_ARE_XY_VALUES); }
void get_anchor_points (unsigned int &point1, unsigned int &point2) const
{
const HBUINT8 *p = &StructAfter<const HBUINT8> (glyphIndex);
const auto *p = &StructAfter<const HBUINT8> (flags);
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT)
p += HBGlyphID24::static_size;
else
#endif
p += HBGlyphID16::static_size;
if (flags & ARG_1_AND_2_ARE_WORDS)
{
point1 = ((const HBUINT16 *) p)[0];
@ -92,6 +106,67 @@ struct CompositeGlyphRecord
}
}
unsigned compile_with_deltas (const contour_point_t &p_delta,
char *out) const
{
const HBINT8 *p = &StructAfter<const HBINT8> (flags);
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT)
p += HBGlyphID24::static_size;
else
#endif
p += HBGlyphID16::static_size;
unsigned len = get_size ();
unsigned len_before_val = (const char *)p - (const char *)this;
if (flags & ARG_1_AND_2_ARE_WORDS)
{
// no overflow, copy and update value with deltas
hb_memcpy (out, this, len);
const HBINT16 *px = reinterpret_cast<const HBINT16 *> (p);
HBINT16 *o = reinterpret_cast<HBINT16 *> (out + len_before_val);
o[0] = px[0] + roundf (p_delta.x);
o[1] = px[1] + roundf (p_delta.y);
}
else
{
int new_x = p[0] + roundf (p_delta.x);
int new_y = p[1] + roundf (p_delta.y);
if (new_x <= 127 && new_x >= -128 &&
new_y <= 127 && new_y >= -128)
{
hb_memcpy (out, this, len);
HBINT8 *o = reinterpret_cast<HBINT8 *> (out + len_before_val);
o[0] = new_x;
o[1] = new_y;
}
else
{
// int8 overflows after deltas applied
hb_memcpy (out, this, len_before_val);
//update flags
CompositeGlyphRecord *o = reinterpret_cast<CompositeGlyphRecord *> (out);
o->flags = flags | ARG_1_AND_2_ARE_WORDS;
out += len_before_val;
HBINT16 new_value;
new_value = new_x;
hb_memcpy (out, &new_value, HBINT16::static_size);
out += HBINT16::static_size;
new_value = new_y;
hb_memcpy (out, &new_value, HBINT16::static_size);
out += HBINT16::static_size;
hb_memcpy (out, p+2, len - len_before_val - 2);
len += 2;
}
}
return len;
}
protected:
bool scaled_offsets () const
{ return (flags & (SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET)) == SCALED_COMPONENT_OFFSET; }
@ -101,8 +176,14 @@ struct CompositeGlyphRecord
matrix[0] = matrix[3] = 1.f;
matrix[1] = matrix[2] = 0.f;
const auto *p = &StructAfter<const HBINT8> (flags);
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT)
p += HBGlyphID24::static_size;
else
#endif
p += HBGlyphID16::static_size;
int tx, ty;
const HBINT8 *p = &StructAfter<const HBINT8> (glyphIndex);
if (flags & ARG_1_AND_2_ARE_WORDS)
{
tx = *(const HBINT16 *) p;
@ -145,61 +226,34 @@ struct CompositeGlyphRecord
}
public:
hb_codepoint_t get_gid () const
{
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT)
return StructAfter<const HBGlyphID24> (flags);
else
#endif
return StructAfter<const HBGlyphID16> (flags);
}
void set_gid (hb_codepoint_t gid)
{
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT)
StructAfter<HBGlyphID24> (flags) = gid;
else
#endif
/* TODO assert? */
StructAfter<HBGlyphID16> (flags) = gid;
}
protected:
HBUINT16 flags;
HBGlyphID16 glyphIndex;
HBUINT24 pad;
public:
DEFINE_SIZE_MIN (4);
};
struct composite_iter_t : hb_iter_with_fallback_t<composite_iter_t, const CompositeGlyphRecord &>
{
typedef const CompositeGlyphRecord *__item_t__;
composite_iter_t (hb_bytes_t glyph_, __item_t__ current_) :
glyph (glyph_), current (nullptr), current_size (0)
{
set_current (current_);
}
composite_iter_t () : glyph (hb_bytes_t ()), current (nullptr), current_size (0) {}
item_t __item__ () const { return *current; }
bool __more__ () const { return current; }
void __next__ ()
{
if (!current->has_more ()) { current = nullptr; return; }
set_current (&StructAtOffset<CompositeGlyphRecord> (current, current_size));
}
composite_iter_t __end__ () const { return composite_iter_t (); }
bool operator != (const composite_iter_t& o) const
{ return current != o.current; }
void set_current (__item_t__ current_)
{
if (!glyph.check_range (current_, CompositeGlyphRecord::min_size))
{
current = nullptr;
current_size = 0;
return;
}
unsigned size = current_->get_size ();
if (!glyph.check_range (current_, size))
{
current = nullptr;
current_size = 0;
return;
}
current = current_;
current_size = size;
}
private:
hb_bytes_t glyph;
__item_t__ current;
unsigned current_size;
};
using composite_iter_t = composite_iter_tmpl<CompositeGlyphRecord>;
struct CompositeGlyph
{
@ -248,6 +302,63 @@ struct CompositeGlyph
return;
glyph_chain.set_overlaps_flag ();
}
bool compile_bytes_with_deltas (const hb_bytes_t &source_bytes,
const contour_point_vector_t &deltas,
hb_bytes_t &dest_bytes /* OUT */)
{
if (source_bytes.length <= GlyphHeader::static_size ||
header.numberOfContours != -1)
{
dest_bytes = hb_bytes_t ();
return true;
}
unsigned source_len = source_bytes.length - GlyphHeader::static_size;
/* try to allocate more memories than source glyph bytes
* in case that there might be an overflow for int8 value
* and we would need to use int16 instead */
char *o = (char *) hb_calloc (source_len + source_len/2, sizeof (char));
if (unlikely (!o)) return false;
const CompositeGlyphRecord *c = reinterpret_cast<const CompositeGlyphRecord *> (source_bytes.arrayZ + GlyphHeader::static_size);
auto it = composite_iter_t (hb_bytes_t ((const char *)c, source_len), c);
char *p = o;
unsigned i = 0, source_comp_len = 0;
for (const auto &component : it)
{
/* last 4 points in deltas are phantom points and should not be included */
if (i >= deltas.length - 4) return false;
unsigned comp_len = component.get_size ();
if (component.is_anchored ())
{
hb_memcpy (p, &component, comp_len);
p += comp_len;
}
else
{
unsigned new_len = component.compile_with_deltas (deltas[i], p);
p += new_len;
}
i++;
source_comp_len += comp_len;
}
//copy instructions if any
if (source_len > source_comp_len)
{
unsigned instr_len = source_len - source_comp_len;
hb_memcpy (p, (const char *)c + source_comp_len, instr_len);
p += instr_len;
}
unsigned len = p - o;
dest_bytes = hb_bytes_t (o, len);
return true;
}
};

View File

@ -7,6 +7,8 @@
#include "GlyphHeader.hh"
#include "SimpleGlyph.hh"
#include "CompositeGlyph.hh"
#include "VarCompositeGlyph.hh"
#include "coord-setter.hh"
namespace OT {
@ -27,7 +29,7 @@ enum phantom_point_index_t
struct Glyph
{
enum glyph_type_t { EMPTY, SIMPLE, COMPOSITE };
enum glyph_type_t { EMPTY, SIMPLE, COMPOSITE, VAR_COMPOSITE };
public:
composite_iter_t get_composite_iterator () const
@ -35,6 +37,11 @@ struct Glyph
if (type != COMPOSITE) return composite_iter_t ();
return CompositeGlyph (*header, bytes).iter ();
}
var_composite_iter_t get_var_composite_iterator () const
{
if (type != VAR_COMPOSITE) return var_composite_iter_t ();
return VarCompositeGlyph (*header, bytes).iter ();
}
const hb_bytes_t trim_padding () const
{
@ -72,22 +79,212 @@ struct Glyph
}
}
void update_mtx (const hb_subset_plan_t *plan,
int xMin, int xMax,
int yMin, int yMax,
const contour_point_vector_t &all_points) const
{
hb_codepoint_t new_gid = 0;
if (!plan->new_gid_for_old_gid (gid, &new_gid))
return;
if (type != EMPTY)
{
plan->bounds_width_map.set (new_gid, xMax - xMin);
plan->bounds_height_map.set (new_gid, yMax - yMin);
}
unsigned len = all_points.length;
float leftSideX = all_points[len - 4].x;
float rightSideX = all_points[len - 3].x;
float topSideY = all_points[len - 2].y;
float bottomSideY = all_points[len - 1].y;
signed hori_aw = roundf (rightSideX - leftSideX);
if (hori_aw < 0) hori_aw = 0;
int lsb = roundf (xMin - leftSideX);
plan->hmtx_map.set (new_gid, hb_pair ((unsigned) hori_aw, lsb));
//flag value should be computed using non-empty glyphs
if (type != EMPTY && lsb != xMin)
plan->head_maxp_info.allXMinIsLsb = false;
signed vert_aw = roundf (topSideY - bottomSideY);
if (vert_aw < 0) vert_aw = 0;
int tsb = roundf (topSideY - yMax);
plan->vmtx_map.set (new_gid, hb_pair ((unsigned) vert_aw, tsb));
}
bool compile_header_bytes (const hb_subset_plan_t *plan,
const contour_point_vector_t &all_points,
hb_bytes_t &dest_bytes /* OUT */) const
{
GlyphHeader *glyph_header = nullptr;
if (!plan->pinned_at_default && type != EMPTY && all_points.length >= 4)
{
glyph_header = (GlyphHeader *) hb_calloc (1, GlyphHeader::static_size);
if (unlikely (!glyph_header)) return false;
}
float xMin = 0, xMax = 0;
float yMin = 0, yMax = 0;
if (all_points.length > 4)
{
xMin = xMax = all_points[0].x;
yMin = yMax = all_points[0].y;
}
for (unsigned i = 1; i < all_points.length - 4; i++)
{
float x = all_points[i].x;
float y = all_points[i].y;
xMin = hb_min (xMin, x);
xMax = hb_max (xMax, x);
yMin = hb_min (yMin, y);
yMax = hb_max (yMax, y);
}
update_mtx (plan, roundf (xMin), roundf (xMax), roundf (yMin), roundf (yMax), all_points);
int rounded_xMin = roundf (xMin);
int rounded_xMax = roundf (xMax);
int rounded_yMin = roundf (yMin);
int rounded_yMax = roundf (yMax);
if (type != EMPTY)
{
plan->head_maxp_info.xMin = hb_min (plan->head_maxp_info.xMin, rounded_xMin);
plan->head_maxp_info.yMin = hb_min (plan->head_maxp_info.yMin, rounded_yMin);
plan->head_maxp_info.xMax = hb_max (plan->head_maxp_info.xMax, rounded_xMax);
plan->head_maxp_info.yMax = hb_max (plan->head_maxp_info.yMax, rounded_yMax);
}
/* when pinned at default, no need to compile glyph header
* and for empty glyphs: all_points only include phantom points.
* just update metrics and then return */
if (!glyph_header)
return true;
glyph_header->numberOfContours = header->numberOfContours;
glyph_header->xMin = rounded_xMin;
glyph_header->yMin = rounded_yMin;
glyph_header->xMax = rounded_xMax;
glyph_header->yMax = rounded_yMax;
dest_bytes = hb_bytes_t ((const char *)glyph_header, GlyphHeader::static_size);
return true;
}
bool compile_bytes_with_deltas (const hb_subset_plan_t *plan,
hb_font_t *font,
const glyf_accelerator_t &glyf,
hb_bytes_t &dest_start, /* IN/OUT */
hb_bytes_t &dest_end /* OUT */)
{
contour_point_vector_t all_points, deltas;
unsigned composite_contours = 0;
head_maxp_info_t *head_maxp_info_p = &plan->head_maxp_info;
unsigned *composite_contours_p = &composite_contours;
// don't compute head/maxp values when glyph has no contours(type is EMPTY)
// also ignore .notdef glyph when --notdef-outline is not enabled
if (type == EMPTY ||
(gid == 0 && !(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)))
{
head_maxp_info_p = nullptr;
composite_contours_p = nullptr;
}
if (!get_points (font, glyf, all_points, &deltas, head_maxp_info_p, composite_contours_p, false, false))
return false;
// .notdef, set type to empty so we only update metrics and don't compile bytes for
// it
if (gid == 0 &&
!(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE))
{
type = EMPTY;
dest_start = hb_bytes_t ();
dest_end = hb_bytes_t ();
}
//dont compile bytes when pinned at default, just recalculate bounds
if (!plan->pinned_at_default) {
switch (type) {
case COMPOSITE:
if (!CompositeGlyph (*header, bytes).compile_bytes_with_deltas (dest_start,
deltas,
dest_end))
return false;
break;
case SIMPLE:
if (!SimpleGlyph (*header, bytes).compile_bytes_with_deltas (all_points,
plan->flags & HB_SUBSET_FLAGS_NO_HINTING,
dest_end))
return false;
break;
default:
/* set empty bytes for empty glyph
* do not use source glyph's pointers */
dest_start = hb_bytes_t ();
dest_end = hb_bytes_t ();
break;
}
}
if (!compile_header_bytes (plan, all_points, dest_start))
{
dest_end.fini ();
return false;
}
return true;
}
/* Note: Recursively calls itself.
* all_points includes phantom points
*/
template <typename accelerator_t>
bool get_points (hb_font_t *font, const accelerator_t &glyf_accelerator,
contour_point_vector_t &all_points /* OUT */,
contour_point_vector_t *deltas = nullptr, /* OUT */
head_maxp_info_t * head_maxp_info = nullptr, /* OUT */
unsigned *composite_contours = nullptr, /* OUT */
bool shift_points_hori = true,
bool use_my_metrics = true,
bool phantom_only = false,
unsigned int depth = 0) const
hb_array_t<int> coords = hb_array_t<int> (),
unsigned int depth = 0,
unsigned *edge_count = nullptr) const
{
if (unlikely (depth > HB_MAX_NESTING_LEVEL)) return false;
unsigned stack_edge_count = 0;
if (!edge_count) edge_count = &stack_edge_count;
if (unlikely (*edge_count > HB_GLYF_MAX_EDGE_COUNT)) return false;
(*edge_count)++;
if (head_maxp_info)
{
head_maxp_info->maxComponentDepth = hb_max (head_maxp_info->maxComponentDepth, depth);
}
if (!coords)
coords = hb_array (font->coords, font->num_coords);
contour_point_vector_t stack_points;
bool inplace = type == SIMPLE && all_points.length == 0;
/* Load into all_points if it's empty, as an optimization. */
contour_point_vector_t &points = inplace ? all_points : stack_points;
switch (type) {
case SIMPLE:
if (depth == 0 && head_maxp_info)
head_maxp_info->maxContours = hb_max (head_maxp_info->maxContours, (unsigned) header->numberOfContours);
if (depth > 0 && composite_contours)
*composite_contours += (unsigned) header->numberOfContours;
if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (points, phantom_only)))
return false;
break;
case COMPOSITE:
{
/* pseudo component points for each component in composite glyph */
@ -95,29 +292,36 @@ struct Glyph
if (unlikely (!points.resize (num_points))) return false;
break;
}
case SIMPLE:
if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (points, phantom_only)))
return false;
#ifndef HB_NO_VAR_COMPOSITES
case VAR_COMPOSITE:
{
for (auto &item : get_var_composite_iterator ())
if (unlikely (!item.get_points (points))) return false;
}
#endif
default:
break;
}
/* Init phantom points */
if (unlikely (!points.resize (points.length + PHANTOM_COUNT))) return false;
hb_array_t<contour_point_t> phantoms = points.sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT);
hb_array_t<contour_point_t> phantoms = points.as_array ().sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT);
{
int h_delta = (int) header->xMin -
glyf_accelerator.hmtx->get_side_bearing (gid);
int lsb = 0;
int h_delta = glyf_accelerator.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb) ?
(int) header->xMin - lsb : 0;
HB_UNUSED int tsb = 0;
int v_orig = (int) header->yMax +
#ifndef HB_NO_VERTICAL
glyf_accelerator.vmtx->get_side_bearing (gid)
((void) glyf_accelerator.vmtx->get_leading_bearing_without_var_unscaled (gid, &tsb), tsb)
#else
0
#endif
;
unsigned h_adv = glyf_accelerator.hmtx->get_advance (gid);
unsigned h_adv = glyf_accelerator.hmtx->get_advance_without_var_unscaled (gid);
unsigned v_adv =
#ifndef HB_NO_VERTICAL
glyf_accelerator.vmtx->get_advance (gid)
glyf_accelerator.vmtx->get_advance_without_var_unscaled (gid)
#else
- font->face->get_upem ()
#endif
@ -128,12 +332,33 @@ struct Glyph
phantoms[PHANTOM_BOTTOM].y = v_orig - (int) v_adv;
}
if (deltas != nullptr && depth == 0 && type == COMPOSITE)
{
if (unlikely (!deltas->resize (points.length))) return false;
deltas->copy_vector (points);
}
#ifndef HB_NO_VAR
glyf_accelerator.gvar->apply_deltas_to_points (gid, font, points.as_array ());
glyf_accelerator.gvar->apply_deltas_to_points (gid,
coords,
points.as_array ());
#endif
// mainly used by CompositeGlyph calculating new X/Y offset value so no need to extend it
// with child glyphs' points
if (deltas != nullptr && depth == 0 && type == COMPOSITE)
{
for (unsigned i = 0 ; i < points.length; i++)
{
deltas->arrayZ[i].x = points.arrayZ[i].x - deltas->arrayZ[i].x;
deltas->arrayZ[i].y = points.arrayZ[i].y - deltas->arrayZ[i].y;
}
}
switch (type) {
case SIMPLE:
if (depth == 0 && head_maxp_info)
head_maxp_info->maxPoints = hb_max (head_maxp_info->maxPoints, points.length - 4);
if (!inplace)
all_points.extend (points.as_array ());
break;
@ -144,13 +369,23 @@ struct Glyph
for (auto &item : get_composite_iterator ())
{
comp_points.reset ();
if (unlikely (!glyf_accelerator.glyph_for_gid (item.glyphIndex)
.get_points (font, glyf_accelerator, comp_points,
phantom_only, depth + 1)))
if (unlikely (!glyf_accelerator.glyph_for_gid (item.get_gid ())
.get_points (font,
glyf_accelerator,
comp_points,
deltas,
head_maxp_info,
composite_contours,
shift_points_hori,
use_my_metrics,
phantom_only,
coords,
depth + 1,
edge_count)))
return false;
/* Copy phantom points from component if USE_MY_METRICS flag set */
if (item.is_use_my_metrics ())
if (use_my_metrics && item.is_use_my_metrics ())
for (unsigned int i = 0; i < PHANTOM_COUNT; i++)
phantoms[i] = comp_points[comp_points.length - PHANTOM_COUNT + i];
@ -174,18 +409,80 @@ struct Glyph
}
}
all_points.extend (comp_points.sub_array (0, comp_points.length - PHANTOM_COUNT));
all_points.extend (comp_points.as_array ().sub_array (0, comp_points.length - PHANTOM_COUNT));
if (all_points.length > HB_GLYF_MAX_POINTS)
return false;
comp_index++;
}
if (head_maxp_info && depth == 0)
{
if (composite_contours)
head_maxp_info->maxCompositeContours = hb_max (head_maxp_info->maxCompositeContours, *composite_contours);
head_maxp_info->maxCompositePoints = hb_max (head_maxp_info->maxCompositePoints, all_points.length);
head_maxp_info->maxComponentElements = hb_max (head_maxp_info->maxComponentElements, comp_index);
}
all_points.extend (phantoms);
} break;
#ifndef HB_NO_VAR_COMPOSITES
case VAR_COMPOSITE:
{
contour_point_vector_t comp_points;
hb_array_t<contour_point_t> points_left = points.as_array ();
for (auto &item : get_var_composite_iterator ())
{
hb_array_t<contour_point_t> record_points = points_left.sub_array (0, item.get_num_points ());
comp_points.reset ();
auto component_coords = coords;
if (item.is_reset_unspecified_axes ())
component_coords = hb_array<int> ();
coord_setter_t coord_setter (component_coords);
item.set_variations (coord_setter, record_points);
if (unlikely (!glyf_accelerator.glyph_for_gid (item.get_gid ())
.get_points (font,
glyf_accelerator,
comp_points,
deltas,
head_maxp_info,
nullptr,
shift_points_hori,
use_my_metrics,
phantom_only,
coord_setter.get_coords (),
depth + 1,
edge_count)))
return false;
/* Apply component transformation */
item.transform_points (record_points, comp_points);
/* Copy phantom points from component if USE_MY_METRICS flag set */
if (use_my_metrics && item.is_use_my_metrics ())
for (unsigned int i = 0; i < PHANTOM_COUNT; i++)
phantoms[i] = comp_points[comp_points.length - PHANTOM_COUNT + i];
all_points.extend (comp_points.as_array ().sub_array (0, comp_points.length - PHANTOM_COUNT));
if (all_points.length > HB_GLYF_MAX_POINTS)
return false;
points_left += item.get_num_points ();
}
all_points.extend (phantoms);
} break;
#endif
default:
all_points.extend (phantoms);
break;
}
if (depth == 0) /* Apply at top level */
if (depth == 0 && shift_points_hori) /* Apply at top level */
{
/* Undocumented rasterizer behavior:
* Shift points horizontally by the updated left side bearing
@ -198,23 +495,30 @@ struct Glyph
return !all_points.in_error ();
}
bool get_extents (hb_font_t *font, const glyf_accelerator_t &glyf_accelerator,
hb_glyph_extents_t *extents) const
bool get_extents_without_var_scaled (hb_font_t *font, const glyf_accelerator_t &glyf_accelerator,
hb_glyph_extents_t *extents) const
{
if (type == EMPTY) return true; /* Empty glyph; zero extents. */
return header->get_extents (font, glyf_accelerator, gid, extents);
return header->get_extents_without_var_scaled (font, glyf_accelerator, gid, extents);
}
hb_bytes_t get_bytes () const { return bytes; }
Glyph (hb_bytes_t bytes_ = hb_bytes_t (),
hb_codepoint_t gid_ = (hb_codepoint_t) -1) : bytes (bytes_),
header (bytes.as<GlyphHeader> ()),
gid (gid_)
Glyph () : bytes (),
header (bytes.as<GlyphHeader> ()),
gid (-1),
type(EMPTY)
{}
Glyph (hb_bytes_t bytes_,
hb_codepoint_t gid_ = (unsigned) -1) : bytes (bytes_),
header (bytes.as<GlyphHeader> ()),
gid (gid_)
{
int num_contours = header->numberOfContours;
if (unlikely (num_contours == 0)) type = EMPTY;
else if (num_contours > 0) type = SIMPLE;
else if (num_contours == -2) type = VAR_COMPOSITE;
else type = COMPOSITE; /* negative numbers */
}

View File

@ -14,15 +14,19 @@ struct GlyphHeader
bool has_data () const { return numberOfContours; }
template <typename accelerator_t>
bool get_extents (hb_font_t *font, const accelerator_t &glyf_accelerator,
hb_codepoint_t gid, hb_glyph_extents_t *extents) const
bool get_extents_without_var_scaled (hb_font_t *font, const accelerator_t &glyf_accelerator,
hb_codepoint_t gid, hb_glyph_extents_t *extents) const
{
/* Undocumented rasterizer behavior: shift glyph to the left by (lsb - xMin), i.e., xMin = lsb */
/* extents->x_bearing = hb_min (glyph_header.xMin, glyph_header.xMax); */
extents->x_bearing = font->em_scale_x (glyf_accelerator.hmtx->get_side_bearing (gid));
extents->y_bearing = font->em_scale_y (hb_max (yMin, yMax));
extents->width = font->em_scale_x (hb_max (xMin, xMax) - hb_min (xMin, xMax));
extents->height = font->em_scale_y (hb_min (yMin, yMax) - hb_max (yMin, yMax));
int lsb = hb_min (xMin, xMax);
(void) glyf_accelerator.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb);
extents->x_bearing = lsb;
extents->y_bearing = hb_max (yMin, yMax);
extents->width = hb_max (xMin, xMax) - hb_min (xMin, xMax);
extents->height = hb_min (yMin, yMax) - hb_max (yMin, yMax);
font->scale_glyph_extents (extents);
return true;
}

View File

@ -20,7 +20,7 @@ struct SimpleGlyph
FLAG_X_SAME = 0x10,
FLAG_Y_SAME = 0x20,
FLAG_OVERLAP_SIMPLE = 0x40,
FLAG_RESERVED2 = 0x80
FLAG_CUBIC = 0x80
};
const GlyphHeader &header;
@ -132,8 +132,8 @@ struct SimpleGlyph
if (unlikely (p + 1 > end)) return false;
unsigned int repeat_count = *p++;
unsigned stop = hb_min (i + repeat_count, count);
for (; i < stop;)
points_.arrayZ[i++].flag = flag;
for (; i < stop; i++)
points_.arrayZ[i].flag = flag;
}
}
return true;
@ -184,7 +184,7 @@ struct SimpleGlyph
if (unlikely (!bytes.check_range (&endPtsOfContours[num_contours]))) return false;
unsigned int num_points = endPtsOfContours[num_contours - 1] + 1;
points_.alloc (num_points + 4); // Allocate for phantom points, to avoid a possible copy
points_.alloc (num_points + 4, true); // Allocate for phantom points, to avoid a possible copy
if (!points_.resize (num_points)) return false;
if (phantom_only) return true;
@ -206,6 +206,129 @@ struct SimpleGlyph
&& read_points (p, points_, end, &contour_point_t::y,
FLAG_Y_SHORT, FLAG_Y_SAME);
}
static void encode_coord (int value,
uint8_t &flag,
const simple_glyph_flag_t short_flag,
const simple_glyph_flag_t same_flag,
hb_vector_t<uint8_t> &coords /* OUT */)
{
if (value == 0)
{
flag |= same_flag;
}
else if (value >= -255 && value <= 255)
{
flag |= short_flag;
if (value > 0) flag |= same_flag;
else value = -value;
coords.arrayZ[coords.length++] = (uint8_t) value;
}
else
{
int16_t val = value;
coords.arrayZ[coords.length++] = val >> 8;
coords.arrayZ[coords.length++] = val & 0xff;
}
}
static void encode_flag (uint8_t &flag,
uint8_t &repeat,
uint8_t lastflag,
hb_vector_t<uint8_t> &flags /* OUT */)
{
if (flag == lastflag && repeat != 255)
{
repeat++;
if (repeat == 1)
{
/* We know there's room. */
flags.arrayZ[flags.length++] = flag;
}
else
{
unsigned len = flags.length;
flags.arrayZ[len-2] = flag | FLAG_REPEAT;
flags.arrayZ[len-1] = repeat;
}
}
else
{
repeat = 0;
flags.push (flag);
}
}
bool compile_bytes_with_deltas (const contour_point_vector_t &all_points,
bool no_hinting,
hb_bytes_t &dest_bytes /* OUT */)
{
if (header.numberOfContours == 0 || all_points.length <= 4)
{
dest_bytes = hb_bytes_t ();
return true;
}
unsigned num_points = all_points.length - 4;
hb_vector_t<uint8_t> flags, x_coords, y_coords;
if (unlikely (!flags.alloc (num_points, true))) return false;
if (unlikely (!x_coords.alloc (2*num_points, true))) return false;
if (unlikely (!y_coords.alloc (2*num_points, true))) return false;
uint8_t lastflag = 255, repeat = 0;
int prev_x = 0, prev_y = 0;
for (unsigned i = 0; i < num_points; i++)
{
uint8_t flag = all_points.arrayZ[i].flag;
flag &= FLAG_ON_CURVE + FLAG_OVERLAP_SIMPLE;
int cur_x = roundf (all_points.arrayZ[i].x);
int cur_y = roundf (all_points.arrayZ[i].y);
encode_coord (cur_x - prev_x, flag, FLAG_X_SHORT, FLAG_X_SAME, x_coords);
encode_coord (cur_y - prev_y, flag, FLAG_Y_SHORT, FLAG_Y_SAME, y_coords);
encode_flag (flag, repeat, lastflag, flags);
prev_x = cur_x;
prev_y = cur_y;
lastflag = flag;
}
unsigned len_before_instrs = 2 * header.numberOfContours + 2;
unsigned len_instrs = instructions_length ();
unsigned total_len = len_before_instrs + flags.length + x_coords.length + y_coords.length;
if (!no_hinting)
total_len += len_instrs;
char *p = (char *) hb_malloc (total_len);
if (unlikely (!p)) return false;
const char *src = bytes.arrayZ + GlyphHeader::static_size;
char *cur = p;
hb_memcpy (p, src, len_before_instrs);
cur += len_before_instrs;
src += len_before_instrs;
if (!no_hinting)
{
hb_memcpy (cur, src, len_instrs);
cur += len_instrs;
}
hb_memcpy (cur, flags.arrayZ, flags.length);
cur += flags.length;
hb_memcpy (cur, x_coords.arrayZ, x_coords.length);
cur += x_coords.length;
hb_memcpy (cur, y_coords.arrayZ, y_coords.length);
dest_bytes = hb_bytes_t (p, total_len);
return true;
}
};

View File

@ -6,12 +6,14 @@
namespace OT {
struct glyf_accelerator_t;
namespace glyf_impl {
struct SubsetGlyph
{
hb_codepoint_t new_gid;
hb_codepoint_t old_gid;
Glyph source_glyph;
hb_bytes_t dest_start; /* region of source_glyph to copy first */
@ -19,14 +21,14 @@ struct SubsetGlyph
bool serialize (hb_serialize_context_t *c,
bool use_short_loca,
const hb_subset_plan_t *plan) const
const hb_subset_plan_t *plan)
{
TRACE_SERIALIZE (this);
hb_bytes_t dest_glyph = dest_start.copy (c);
dest_glyph = hb_bytes_t (&dest_glyph, dest_glyph.length + dest_end.copy (c).length);
unsigned int pad_length = use_short_loca ? padding () : 0;
DEBUG_MSG (SUBSET, nullptr, "serialize %d byte glyph, width %d pad %d", dest_glyph.length, dest_glyph.length + pad_length, pad_length);
DEBUG_MSG (SUBSET, nullptr, "serialize %u byte glyph, width %u pad %u", dest_glyph.length, dest_glyph.length + pad_length, pad_length);
HBUINT8 pad;
pad = 0;
@ -42,8 +44,8 @@ struct SubsetGlyph
for (auto &_ : Glyph (dest_glyph).get_composite_iterator ())
{
hb_codepoint_t new_gid;
if (plan->new_gid_for_old_gid (_.glyphIndex, &new_gid))
const_cast<CompositeGlyphRecord &> (_).glyphIndex = new_gid;
if (plan->new_gid_for_old_gid (_.get_gid(), &new_gid))
const_cast<CompositeGlyphRecord &> (_).set_gid (new_gid);
}
if (plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
@ -55,6 +57,17 @@ struct SubsetGlyph
return_trace (true);
}
bool compile_bytes_with_deltas (const hb_subset_plan_t *plan,
hb_font_t *font,
const glyf_accelerator_t &glyf)
{ return source_glyph.compile_bytes_with_deltas (plan, font, glyf, dest_start, dest_end); }
void free_compiled_bytes ()
{
dest_start.fini ();
dest_end.fini ();
}
void drop_hints_bytes ()
{ source_glyph.drop_hints_bytes (dest_start, dest_end); }

View File

@ -0,0 +1,354 @@
#ifndef OT_GLYF_VARCOMPOSITEGLYPH_HH
#define OT_GLYF_VARCOMPOSITEGLYPH_HH
#include "../../hb-open-type.hh"
#include "coord-setter.hh"
namespace OT {
namespace glyf_impl {
struct VarCompositeGlyphRecord
{
protected:
enum var_composite_glyph_flag_t
{
USE_MY_METRICS = 0x0001,
AXIS_INDICES_ARE_SHORT = 0x0002,
UNIFORM_SCALE = 0x0004,
HAVE_TRANSLATE_X = 0x0008,
HAVE_TRANSLATE_Y = 0x0010,
HAVE_ROTATION = 0x0020,
HAVE_SCALE_X = 0x0040,
HAVE_SCALE_Y = 0x0080,
HAVE_SKEW_X = 0x0100,
HAVE_SKEW_Y = 0x0200,
HAVE_TCENTER_X = 0x0400,
HAVE_TCENTER_Y = 0x0800,
GID_IS_24 = 0x1000,
AXES_HAVE_VARIATION = 0x2000,
RESET_UNSPECIFIED_AXES = 0x4000,
};
public:
unsigned int get_size () const
{
unsigned int size = min_size;
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 4 : 3;
size += numAxes * axis_width;
// gid
size += 2;
if (flags & GID_IS_24) size += 1;
if (flags & HAVE_TRANSLATE_X) size += 2;
if (flags & HAVE_TRANSLATE_Y) size += 2;
if (flags & HAVE_ROTATION) size += 2;
if (flags & HAVE_SCALE_X) size += 2;
if (flags & HAVE_SCALE_Y) size += 2;
if (flags & HAVE_SKEW_X) size += 2;
if (flags & HAVE_SKEW_Y) size += 2;
if (flags & HAVE_TCENTER_X) size += 2;
if (flags & HAVE_TCENTER_Y) size += 2;
return size;
}
bool has_more () const { return true; }
bool is_use_my_metrics () const { return flags & USE_MY_METRICS; }
bool is_reset_unspecified_axes () const { return flags & RESET_UNSPECIFIED_AXES; }
hb_codepoint_t get_gid () const
{
if (flags & GID_IS_24)
return StructAfter<const HBGlyphID24> (numAxes);
else
return StructAfter<const HBGlyphID16> (numAxes);
}
unsigned get_numAxes () const
{
return numAxes;
}
unsigned get_num_points () const
{
unsigned num = 0;
if (flags & AXES_HAVE_VARIATION) num += numAxes;
if (flags & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y)) num++;
if (flags & HAVE_ROTATION) num++;
if (flags & (HAVE_SCALE_X | HAVE_SCALE_Y)) num++;
if (flags & (HAVE_SKEW_X | HAVE_SKEW_Y)) num++;
if (flags & (HAVE_TCENTER_X | HAVE_TCENTER_Y)) num++;
return num;
}
void transform_points (hb_array_t<contour_point_t> record_points,
contour_point_vector_t &points) const
{
float matrix[4];
contour_point_t trans;
get_transformation_from_points (record_points, matrix, trans);
points.transform (matrix);
points.translate (trans);
}
static inline void transform (float (&matrix)[4], contour_point_t &trans,
float (other)[6])
{
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L268
float xx1 = other[0];
float xy1 = other[1];
float yx1 = other[2];
float yy1 = other[3];
float dx1 = other[4];
float dy1 = other[5];
float xx2 = matrix[0];
float xy2 = matrix[1];
float yx2 = matrix[2];
float yy2 = matrix[3];
float dx2 = trans.x;
float dy2 = trans.y;
matrix[0] = xx1*xx2 + xy1*yx2;
matrix[1] = xx1*xy2 + xy1*yy2;
matrix[2] = yx1*xx2 + yy1*yx2;
matrix[3] = yx1*xy2 + yy1*yy2;
trans.x = xx2*dx1 + yx2*dy1 + dx2;
trans.y = xy2*dx1 + yy2*dy1 + dy2;
}
static void translate (float (&matrix)[4], contour_point_t &trans,
float translateX, float translateY)
{
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L213
float other[6] = {1.f, 0.f, 0.f, 1.f, translateX, translateY};
transform (matrix, trans, other);
}
static void scale (float (&matrix)[4], contour_point_t &trans,
float scaleX, float scaleY)
{
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L224
float other[6] = {scaleX, 0.f, 0.f, scaleY, 0.f, 0.f};
transform (matrix, trans, other);
}
static void rotate (float (&matrix)[4], contour_point_t &trans,
float rotation)
{
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L240
rotation = rotation * float (M_PI);
float c = cosf (rotation);
float s = sinf (rotation);
float other[6] = {c, s, -s, c, 0.f, 0.f};
transform (matrix, trans, other);
}
static void skew (float (&matrix)[4], contour_point_t &trans,
float skewX, float skewY)
{
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L255
skewX = skewX * float (M_PI);
skewY = skewY * float (M_PI);
float other[6] = {1.f, tanf (skewY), tanf (skewX), 1.f, 0.f, 0.f};
transform (matrix, trans, other);
}
bool get_points (contour_point_vector_t &points) const
{
float translateX = 0.f;
float translateY = 0.f;
float rotation = 0.f;
float scaleX = 1.f * (1 << 10);
float scaleY = 1.f * (1 << 10);
float skewX = 0.f;
float skewY = 0.f;
float tCenterX = 0.f;
float tCenterY = 0.f;
if (unlikely (!points.resize (points.length + get_num_points ()))) return false;
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 2 : 1;
unsigned axes_size = numAxes * axis_width;
const F2DOT14 *q = (const F2DOT14 *) (axes_size +
(flags & GID_IS_24 ? 3 : 2) +
&StructAfter<const HBUINT8> (numAxes));
hb_array_t<contour_point_t> rec_points = points.as_array ().sub_array (points.length - get_num_points ());
unsigned count = numAxes;
if (flags & AXES_HAVE_VARIATION)
{
for (unsigned i = 0; i < count; i++)
rec_points[i].x = q++->to_int ();
rec_points += count;
}
else
q += count;
const HBUINT16 *p = (const HBUINT16 *) q;
if (flags & HAVE_TRANSLATE_X) translateX = * (const FWORD *) p++;
if (flags & HAVE_TRANSLATE_Y) translateY = * (const FWORD *) p++;
if (flags & HAVE_ROTATION) rotation = ((const F4DOT12 *) p++)->to_int ();
if (flags & HAVE_SCALE_X) scaleX = ((const F6DOT10 *) p++)->to_int ();
if (flags & HAVE_SCALE_Y) scaleY = ((const F6DOT10 *) p++)->to_int ();
if (flags & HAVE_SKEW_X) skewX = ((const F4DOT12 *) p++)->to_int ();
if (flags & HAVE_SKEW_Y) skewY = ((const F4DOT12 *) p++)->to_int ();
if (flags & HAVE_TCENTER_X) tCenterX = * (const FWORD *) p++;
if (flags & HAVE_TCENTER_Y) tCenterY = * (const FWORD *) p++;
if ((flags & UNIFORM_SCALE) && !(flags & HAVE_SCALE_Y))
scaleY = scaleX;
if (flags & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y))
{
rec_points[0].x = translateX;
rec_points[0].y = translateY;
rec_points++;
}
if (flags & HAVE_ROTATION)
{
rec_points[0].x = rotation;
rec_points++;
}
if (flags & (HAVE_SCALE_X | HAVE_SCALE_Y))
{
rec_points[0].x = scaleX;
rec_points[0].y = scaleY;
rec_points++;
}
if (flags & (HAVE_SKEW_X | HAVE_SKEW_Y))
{
rec_points[0].x = skewX;
rec_points[0].y = skewY;
rec_points++;
}
if (flags & (HAVE_TCENTER_X | HAVE_TCENTER_Y))
{
rec_points[0].x = tCenterX;
rec_points[0].y = tCenterY;
rec_points++;
}
assert (!rec_points);
return true;
}
void get_transformation_from_points (hb_array_t<contour_point_t> rec_points,
float (&matrix)[4], contour_point_t &trans) const
{
if (flags & AXES_HAVE_VARIATION)
rec_points += numAxes;
matrix[0] = matrix[3] = 1.f;
matrix[1] = matrix[2] = 0.f;
trans.init (0.f, 0.f);
float translateX = 0.f;
float translateY = 0.f;
float rotation = 0.f;
float scaleX = 1.f;
float scaleY = 1.f;
float skewX = 0.f;
float skewY = 0.f;
float tCenterX = 0.f;
float tCenterY = 0.f;
if (flags & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y))
{
translateX = rec_points[0].x;
translateY = rec_points[0].y;
rec_points++;
}
if (flags & HAVE_ROTATION)
{
rotation = rec_points[0].x / (1 << 12);
rec_points++;
}
if (flags & (HAVE_SCALE_X | HAVE_SCALE_Y))
{
scaleX = rec_points[0].x / (1 << 10);
scaleY = rec_points[0].y / (1 << 10);
rec_points++;
}
if (flags & (HAVE_SKEW_X | HAVE_SKEW_Y))
{
skewX = rec_points[0].x / (1 << 12);
skewY = rec_points[0].y / (1 << 12);
rec_points++;
}
if (flags & (HAVE_TCENTER_X | HAVE_TCENTER_Y))
{
tCenterX = rec_points[0].x;
tCenterY = rec_points[0].y;
rec_points++;
}
assert (!rec_points);
translate (matrix, trans, translateX + tCenterX, translateY + tCenterY);
rotate (matrix, trans, rotation);
scale (matrix, trans, scaleX, scaleY);
skew (matrix, trans, -skewX, skewY);
translate (matrix, trans, -tCenterX, -tCenterY);
}
void set_variations (coord_setter_t &setter,
hb_array_t<contour_point_t> rec_points) const
{
bool have_variations = flags & AXES_HAVE_VARIATION;
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 2 : 1;
const HBUINT8 *p = (const HBUINT8 *) (((HBUINT8 *) &numAxes) + numAxes.static_size + (flags & GID_IS_24 ? 3 : 2));
const HBUINT16 *q = (const HBUINT16 *) (((HBUINT8 *) &numAxes) + numAxes.static_size + (flags & GID_IS_24 ? 3 : 2));
const F2DOT14 *a = (const F2DOT14 *) ((HBUINT8 *) (axis_width == 1 ? (p + numAxes) : (HBUINT8 *) (q + numAxes)));
unsigned count = numAxes;
for (unsigned i = 0; i < count; i++)
{
unsigned axis_index = axis_width == 1 ? (unsigned) *p++ : (unsigned) *q++;
signed v = have_variations ? rec_points[i].x : a++->to_int ();
v = hb_clamp (v, -(1<<14), (1<<14));
setter[axis_index] = v;
}
}
protected:
HBUINT16 flags;
HBUINT8 numAxes;
public:
DEFINE_SIZE_MIN (3);
};
using var_composite_iter_t = composite_iter_tmpl<VarCompositeGlyphRecord>;
struct VarCompositeGlyph
{
const GlyphHeader &header;
hb_bytes_t bytes;
VarCompositeGlyph (const GlyphHeader &header_, hb_bytes_t bytes_) :
header (header_), bytes (bytes_) {}
var_composite_iter_t iter () const
{ return var_composite_iter_t (bytes, &StructAfter<VarCompositeGlyphRecord, GlyphHeader> (header)); }
};
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_VARCOMPOSITEGLYPH_HH */

View File

@ -0,0 +1,68 @@
#ifndef OT_GLYF_COMPOSITE_ITER_HH
#define OT_GLYF_COMPOSITE_ITER_HH
#include "../../hb.hh"
namespace OT {
namespace glyf_impl {
template <typename CompositeGlyphRecord>
struct composite_iter_tmpl : hb_iter_with_fallback_t<composite_iter_tmpl<CompositeGlyphRecord>,
const CompositeGlyphRecord &>
{
typedef const CompositeGlyphRecord *__item_t__;
composite_iter_tmpl (hb_bytes_t glyph_, __item_t__ current_) :
glyph (glyph_), current (nullptr), current_size (0)
{
set_current (current_);
}
composite_iter_tmpl () : glyph (hb_bytes_t ()), current (nullptr), current_size (0) {}
const CompositeGlyphRecord & __item__ () const { return *current; }
bool __more__ () const { return current; }
void __next__ ()
{
if (!current->has_more ()) { current = nullptr; return; }
set_current (&StructAtOffset<CompositeGlyphRecord> (current, current_size));
}
composite_iter_tmpl __end__ () const { return composite_iter_tmpl (); }
bool operator != (const composite_iter_tmpl& o) const
{ return current != o.current; }
void set_current (__item_t__ current_)
{
if (!glyph.check_range (current_, CompositeGlyphRecord::min_size))
{
current = nullptr;
current_size = 0;
return;
}
unsigned size = current_->get_size ();
if (!glyph.check_range (current_, size))
{
current = nullptr;
current_size = 0;
return;
}
current = current_;
current_size = size;
}
private:
hb_bytes_t glyph;
__item_t__ current;
unsigned current_size;
};
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_COMPOSITE_ITER_HH */

View File

@ -0,0 +1,34 @@
#ifndef OT_GLYF_COORD_SETTER_HH
#define OT_GLYF_COORD_SETTER_HH
#include "../../hb.hh"
namespace OT {
namespace glyf_impl {
struct coord_setter_t
{
coord_setter_t (hb_array_t<int> coords) :
coords (coords) {}
int& operator [] (unsigned idx)
{
if (coords.length < idx + 1)
coords.resize (idx + 1);
return coords[idx];
}
hb_array_t<int> get_coords ()
{ return coords.as_array (); }
hb_vector_t<int> coords;
};
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_COORD_SETTER_HH */

View File

@ -16,7 +16,7 @@ template<typename IteratorIn, typename IteratorOut,
hb_requires (hb_is_source_of (IteratorIn, unsigned int)),
hb_requires (hb_is_sink_of (IteratorOut, unsigned))>
static void
_write_loca (IteratorIn it, bool short_offsets, IteratorOut dest)
_write_loca (IteratorIn&& it, bool short_offsets, IteratorOut&& dest)
{
unsigned right_shift = short_offsets ? 1 : 0;
unsigned int offset = 0;
@ -25,7 +25,7 @@ _write_loca (IteratorIn it, bool short_offsets, IteratorOut dest)
| hb_map ([=, &offset] (unsigned int padded_size)
{
offset += padded_size;
DEBUG_MSG (SUBSET, nullptr, "loca entry offset %d", offset);
DEBUG_MSG (SUBSET, nullptr, "loca entry offset %u", offset);
return offset >> right_shift;
})
| hb_sink (dest)
@ -44,6 +44,20 @@ _add_head_and_set_loca_version (hb_subset_plan_t *plan, bool use_short_loca)
head *head_prime = (head *) hb_blob_get_data_writable (head_prime_blob, nullptr);
head_prime->indexToLocFormat = use_short_loca ? 0 : 1;
if (plan->normalized_coords)
{
head_prime->xMin = plan->head_maxp_info.xMin;
head_prime->xMax = plan->head_maxp_info.xMax;
head_prime->yMin = plan->head_maxp_info.yMin;
head_prime->yMax = plan->head_maxp_info.yMax;
unsigned orig_flag = head_prime->flags;
if (plan->head_maxp_info.allXMinIsLsb)
orig_flag |= 1 << 1;
else
orig_flag &= ~(1 << 1);
head_prime->flags = orig_flag;
}
bool success = plan->add_table (HB_OT_TAG_head, head_prime_blob);
hb_blob_destroy (head_prime_blob);
@ -61,7 +75,7 @@ _add_loca_and_head (hb_subset_plan_t * plan, Iterator padded_offsets, bool use_s
if (unlikely (!loca_prime_data)) return false;
DEBUG_MSG (SUBSET, nullptr, "loca entry_size %d num_offsets %d size %d",
DEBUG_MSG (SUBSET, nullptr, "loca entry_size %u num_offsets %u size %u",
entry_size, num_offsets, entry_size * num_offsets);
if (use_short_loca)

View File

@ -7,6 +7,7 @@
#include "../../hb-ot-hmtx-table.hh"
#include "../../hb-ot-var-gvar-table.hh"
#include "../../hb-draw.hh"
#include "../../hb-paint.hh"
#include "glyf-helpers.hh"
#include "Glyph.hh"
@ -24,7 +25,6 @@ namespace OT {
*/
#define HB_OT_TAG_glyf HB_TAG('g','l','y','f')
struct glyf
{
friend struct glyf_accelerator_t;
@ -46,8 +46,11 @@ struct glyf
const hb_subset_plan_t *plan)
{
TRACE_SERIALIZE (this);
unsigned init_len = c->length ();
for (const auto &_ : it) _.serialize (c, use_short_loca, plan);
for (auto &_ : it)
if (unlikely (!_.serialize (c, use_short_loca, plan)))
return false;
/* As a special case when all glyph in the font are empty, add a zero byte
* to the table, so that OTS doesnt reject it, and to make the table work
@ -72,36 +75,67 @@ struct glyf
glyf *glyf_prime = c->serializer->start_embed <glyf> ();
if (unlikely (!c->serializer->check_success (glyf_prime))) return_trace (false);
hb_vector_t<glyf_impl::SubsetGlyph> glyphs;
_populate_subset_glyphs (c->plan, &glyphs);
auto padded_offsets =
+ hb_iter (glyphs)
| hb_map (&glyf_impl::SubsetGlyph::padded_size)
;
unsigned max_offset = + padded_offsets | hb_reduce (hb_add, 0);
bool use_short_loca = max_offset < 0x1FFFF;
glyf_prime->serialize (c->serializer, hb_iter (glyphs), use_short_loca, c->plan);
if (!use_short_loca) {
padded_offsets =
+ hb_iter (glyphs)
| hb_map (&glyf_impl::SubsetGlyph::length)
;
hb_font_t *font = nullptr;
if (c->plan->normalized_coords)
{
font = _create_font_for_instancing (c->plan);
if (unlikely (!font)) return false;
}
hb_vector_t<unsigned> padded_offsets;
unsigned num_glyphs = c->plan->num_output_glyphs ();
if (unlikely (!padded_offsets.resize (num_glyphs)))
return false;
hb_vector_t<glyf_impl::SubsetGlyph> glyphs;
if (!_populate_subset_glyphs (c->plan, font, glyphs))
return false;
if (font)
hb_font_destroy (font);
unsigned max_offset = 0;
for (unsigned i = 0; i < num_glyphs; i++)
{
padded_offsets[i] = glyphs[i].padded_size ();
max_offset += padded_offsets[i];
}
bool use_short_loca = false;
if (likely (!c->plan->force_long_loca))
use_short_loca = max_offset < 0x1FFFF;
if (!use_short_loca) {
for (unsigned i = 0; i < num_glyphs; i++)
padded_offsets[i] = glyphs[i].length ();
}
bool result = glyf_prime->serialize (c->serializer, glyphs.writer (), use_short_loca, c->plan);
if (c->plan->normalized_coords && !c->plan->pinned_at_default)
_free_compiled_subset_glyphs (glyphs, glyphs.length - 1);
if (!result) return false;
if (unlikely (c->serializer->in_error ())) return_trace (false);
return_trace (c->serializer->check_success (glyf_impl::_add_loca_and_head (c->plan,
padded_offsets,
padded_offsets.iter (),
use_short_loca)));
}
void
bool
_populate_subset_glyphs (const hb_subset_plan_t *plan,
hb_vector_t<glyf_impl::SubsetGlyph> *glyphs /* OUT */) const;
hb_font_t *font,
hb_vector_t<glyf_impl::SubsetGlyph> &glyphs /* OUT */) const;
hb_font_t *
_create_font_for_instancing (const hb_subset_plan_t *plan) const;
void _free_compiled_subset_glyphs (hb_vector_t<glyf_impl::SubsetGlyph> &glyphs, unsigned index) const
{
for (unsigned i = 0; i <= index && i < glyphs.length; i++)
glyphs[i].free_compiled_bytes ();
}
protected:
UnsizedArrayOf<HBUINT8>
@ -166,7 +200,7 @@ struct glyf_accelerator_t
contour_point_vector_t all_points;
bool phantom_only = !consumer.is_consuming_contour_points ();
if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, phantom_only)))
if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, nullptr, nullptr, nullptr, true, true, phantom_only)))
return false;
if (consumer.is_consuming_contour_points ())
@ -194,6 +228,7 @@ struct glyf_accelerator_t
hb_font_t *font;
hb_glyph_extents_t *extents;
contour_point_t *phantoms;
bool scaled;
struct contour_bounds_t
{
@ -209,7 +244,7 @@ struct glyf_accelerator_t
bool empty () const { return (min_x >= max_x) || (min_y >= max_y); }
void get_extents (hb_font_t *font, hb_glyph_extents_t *extents)
void get_extents (hb_font_t *font, hb_glyph_extents_t *extents, bool scaled)
{
if (unlikely (empty ()))
{
@ -219,26 +254,32 @@ struct glyf_accelerator_t
extents->y_bearing = 0;
return;
}
extents->x_bearing = font->em_scalef_x (min_x);
extents->width = font->em_scalef_x (max_x) - extents->x_bearing;
extents->y_bearing = font->em_scalef_y (max_y);
extents->height = font->em_scalef_y (min_y) - extents->y_bearing;
{
extents->x_bearing = roundf (min_x);
extents->width = roundf (max_x - extents->x_bearing);
extents->y_bearing = roundf (max_y);
extents->height = roundf (min_y - extents->y_bearing);
if (scaled)
font->scale_glyph_extents (extents);
}
}
protected:
float min_x, min_y, max_x, max_y;
} bounds;
points_aggregator_t (hb_font_t *font_, hb_glyph_extents_t *extents_, contour_point_t *phantoms_)
points_aggregator_t (hb_font_t *font_, hb_glyph_extents_t *extents_, contour_point_t *phantoms_, bool scaled_)
{
font = font_;
extents = extents_;
phantoms = phantoms_;
scaled = scaled_;
if (extents) bounds = contour_bounds_t ();
}
void consume_point (const contour_point_t &point) { bounds.add (point); }
void points_end () { bounds.get_extents (font, extents); }
void points_end () { bounds.get_extents (font, extents, scaled); }
bool is_consuming_contour_points () { return extents; }
contour_point_t *get_phantoms_sink () { return phantoms; }
@ -246,22 +287,22 @@ struct glyf_accelerator_t
public:
unsigned
get_advance_var (hb_font_t *font, hb_codepoint_t gid, bool is_vertical) const
get_advance_with_var_unscaled (hb_font_t *font, hb_codepoint_t gid, bool is_vertical) const
{
if (unlikely (gid >= num_glyphs)) return 0;
bool success = false;
contour_point_t phantoms[glyf_impl::PHANTOM_COUNT];
if (likely (font->num_coords == gvar->get_axis_count ()))
success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms));
if (font->num_coords)
success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms, false));
if (unlikely (!success))
return
#ifndef HB_NO_VERTICAL
is_vertical ? vmtx->get_advance (gid) :
is_vertical ? vmtx->get_advance_without_var_unscaled (gid) :
#endif
hmtx->get_advance (gid);
hmtx->get_advance_without_var_unscaled (gid);
float result = is_vertical
? phantoms[glyf_impl::PHANTOM_TOP].y - phantoms[glyf_impl::PHANTOM_BOTTOM].y
@ -269,23 +310,20 @@ struct glyf_accelerator_t
return hb_clamp (roundf (result), 0.f, (float) UINT_MAX / 2);
}
int get_side_bearing_var (hb_font_t *font, hb_codepoint_t gid, bool is_vertical) const
bool get_leading_bearing_with_var_unscaled (hb_font_t *font, hb_codepoint_t gid, bool is_vertical, int *lsb) const
{
if (unlikely (gid >= num_glyphs)) return 0;
if (unlikely (gid >= num_glyphs)) return false;
hb_glyph_extents_t extents;
contour_point_t phantoms[glyf_impl::PHANTOM_COUNT];
if (unlikely (!get_points (font, gid, points_aggregator_t (font, &extents, phantoms))))
return
#ifndef HB_NO_VERTICAL
is_vertical ? vmtx->get_side_bearing (gid) :
#endif
hmtx->get_side_bearing (gid);
if (unlikely (!get_points (font, gid, points_aggregator_t (font, &extents, phantoms, false))))
return false;
return is_vertical
? ceilf (phantoms[glyf_impl::PHANTOM_TOP].y) - extents.y_bearing
: floorf (phantoms[glyf_impl::PHANTOM_LEFT].x);
*lsb = is_vertical
? roundf (phantoms[glyf_impl::PHANTOM_TOP].y) - extents.y_bearing
: roundf (phantoms[glyf_impl::PHANTOM_LEFT].x);
return true;
}
#endif
@ -296,9 +334,18 @@ struct glyf_accelerator_t
#ifndef HB_NO_VAR
if (font->num_coords)
return get_points (font, gid, points_aggregator_t (font, extents, nullptr));
return get_points (font, gid, points_aggregator_t (font, extents, nullptr, true));
#endif
return glyph_for_gid (gid).get_extents (font, *this, extents);
return glyph_for_gid (gid).get_extents_without_var_scaled (font, *this, extents);
}
bool paint_glyph (hb_font_t *font, hb_codepoint_t gid, hb_paint_funcs_t *funcs, void *data, hb_color_t foreground) const
{
funcs->push_clip_glyph (data, gid, font);
funcs->color (data, true, foreground);
funcs->pop_clip (data);
return true;
}
const glyf_impl::Glyph
@ -349,37 +396,76 @@ struct glyf_accelerator_t
};
inline void
inline bool
glyf::_populate_subset_glyphs (const hb_subset_plan_t *plan,
hb_vector_t<glyf_impl::SubsetGlyph> *glyphs /* OUT */) const
hb_font_t *font,
hb_vector_t<glyf_impl::SubsetGlyph>& glyphs /* OUT */) const
{
OT::glyf_accelerator_t glyf (plan->source);
unsigned num_glyphs = plan->num_output_glyphs ();
if (!glyphs.resize (num_glyphs)) return false;
+ hb_range (plan->num_output_glyphs ())
| hb_map ([&] (hb_codepoint_t new_gid)
{
glyf_impl::SubsetGlyph subset_glyph = {0};
subset_glyph.new_gid = new_gid;
unsigned idx = 0;
for (auto p : plan->glyph_map->iter ())
{
unsigned new_gid = p.second;
glyf_impl::SubsetGlyph& subset_glyph = glyphs.arrayZ[new_gid];
subset_glyph.old_gid = p.first;
/* should never fail: all old gids should be mapped */
if (!plan->old_gid_for_new_gid (new_gid, &subset_glyph.old_gid))
return subset_glyph;
if (unlikely (new_gid == 0 &&
!(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) &&
!plan->normalized_coords)
subset_glyph.source_glyph = glyf_impl::Glyph ();
else
{
/* If plan has an accelerator, the preprocessing step already trimmed glyphs.
* Don't trim them again! */
subset_glyph.source_glyph = glyf.glyph_for_gid (subset_glyph.old_gid, !plan->accelerator);
}
if (new_gid == 0 &&
!(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE))
subset_glyph.source_glyph = glyf_impl::Glyph ();
else
subset_glyph.source_glyph = glyf.glyph_for_gid (subset_glyph.old_gid, true);
if (plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
subset_glyph.drop_hints_bytes ();
else
subset_glyph.dest_start = subset_glyph.source_glyph.get_bytes ();
return subset_glyph;
})
| hb_sink (glyphs)
;
if (plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
subset_glyph.drop_hints_bytes ();
else
subset_glyph.dest_start = subset_glyph.source_glyph.get_bytes ();
if (font)
{
if (unlikely (!subset_glyph.compile_bytes_with_deltas (plan, font, glyf)))
{
// when pinned at default, only bounds are updated, thus no need to free
if (!plan->pinned_at_default && idx > 0)
_free_compiled_subset_glyphs (glyphs, idx - 1);
return false;
}
idx++;
}
}
return true;
}
inline hb_font_t *
glyf::_create_font_for_instancing (const hb_subset_plan_t *plan) const
{
hb_font_t *font = hb_font_create (plan->source);
if (unlikely (font == hb_font_get_empty ())) return nullptr;
hb_vector_t<hb_variation_t> vars;
if (unlikely (!vars.alloc (plan->user_axes_location.get_population (), true)))
return nullptr;
for (auto _ : plan->user_axes_location)
{
hb_variation_t var;
var.tag = _.first;
var.value = _.second;
vars.push (var);
}
#ifndef HB_NO_VAR
hb_font_set_variations (font, vars.arrayZ, plan->user_axes_location.get_population ());
#endif
return font;
}
} /* namespace OT */

View File

@ -26,22 +26,29 @@ struct path_builder_t
optional_point_t lerp (optional_point_t p, float t)
{ return optional_point_t (x + t * (p.x - x), y + t * (p.y - y)); }
} first_oncurve, first_offcurve, last_offcurve;
} first_oncurve, first_offcurve, last_offcurve, last_offcurve2;
path_builder_t (hb_font_t *font_, hb_draw_session_t &draw_session_)
{
font = font_;
draw_session = &draw_session_;
first_oncurve = first_offcurve = last_offcurve = optional_point_t ();
first_oncurve = first_offcurve = last_offcurve = last_offcurve2 = optional_point_t ();
}
/* based on https://github.com/RazrFalcon/ttf-parser/blob/4f32821/src/glyf.rs#L287
See also:
* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html
* https://stackoverflow.com/a/20772557 */
* https://stackoverflow.com/a/20772557
*
* Cubic support added (incomplete). */
void consume_point (const contour_point_t &point)
{
bool is_on_curve = point.flag & glyf_impl::SimpleGlyph::FLAG_ON_CURVE;
#ifdef HB_NO_CUBIC_GLYF
bool is_cubic = false;
#else
bool is_cubic = !is_on_curve && (point.flag & glyf_impl::SimpleGlyph::FLAG_CUBIC);
#endif
optional_point_t p (font->em_fscalef_x (point.x), font->em_fscalef_y (point.y));
if (!first_oncurve)
{
@ -69,16 +76,41 @@ struct path_builder_t
{
if (is_on_curve)
{
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
p.x, p.y);
if (last_offcurve2)
{
draw_session->cubic_to (last_offcurve2.x, last_offcurve2.y,
last_offcurve.x, last_offcurve.y,
p.x, p.y);
last_offcurve2 = optional_point_t ();
}
else
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
p.x, p.y);
last_offcurve = optional_point_t ();
}
else
{
optional_point_t mid = last_offcurve.lerp (p, .5f);
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
mid.x, mid.y);
last_offcurve = p;
if (is_cubic && !last_offcurve2)
{
last_offcurve2 = last_offcurve;
last_offcurve = p;
}
else
{
optional_point_t mid = last_offcurve.lerp (p, .5f);
if (is_cubic)
{
draw_session->cubic_to (last_offcurve2.x, last_offcurve2.y,
last_offcurve.x, last_offcurve.y,
mid.x, mid.y);
last_offcurve2 = optional_point_t ();
}
else
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
mid.x, mid.y);
last_offcurve = p;
}
}
}
else
@ -105,8 +137,15 @@ struct path_builder_t
draw_session->quadratic_to (first_offcurve.x, first_offcurve.y,
first_oncurve.x, first_oncurve.y);
else if (last_offcurve && first_oncurve)
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
first_oncurve.x, first_oncurve.y);
{
if (last_offcurve2)
draw_session->cubic_to (last_offcurve2.x, last_offcurve2.y,
last_offcurve.x, last_offcurve.y,
first_oncurve.x, first_oncurve.y);
else
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
first_oncurve.x, first_oncurve.y);
}
else if (first_oncurve)
draw_session->line_to (first_oncurve.x, first_oncurve.y);
else if (first_offcurve)
@ -117,7 +156,7 @@ struct path_builder_t
}
/* Getting ready for the next contour */
first_oncurve = first_offcurve = last_offcurve = optional_point_t ();
first_oncurve = first_offcurve = last_offcurve = last_offcurve2 = optional_point_t ();
draw_session->close_path ();
}
}

View File

@ -0,0 +1,589 @@
/*
* Copyright © 2011,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Behdad Esfahbod
*/
#ifndef OT_NAME_NAME_HH
#define OT_NAME_NAME_HH
#include "../../hb-open-type.hh"
#include "../../hb-ot-name-language.hh"
#include "../../hb-aat-layout.hh"
#include "../../hb-utf.hh"
namespace OT {
template <typename in_utf_t, typename out_utf_t>
inline unsigned int
hb_ot_name_convert_utf (hb_bytes_t bytes,
unsigned int *text_size /* IN/OUT */,
typename out_utf_t::codepoint_t *text /* OUT */)
{
unsigned int src_len = bytes.length / sizeof (typename in_utf_t::codepoint_t);
const typename in_utf_t::codepoint_t *src = (const typename in_utf_t::codepoint_t *) bytes.arrayZ;
const typename in_utf_t::codepoint_t *src_end = src + src_len;
typename out_utf_t::codepoint_t *dst = text;
hb_codepoint_t unicode;
const hb_codepoint_t replacement = HB_BUFFER_REPLACEMENT_CODEPOINT_DEFAULT;
if (text_size && *text_size)
{
(*text_size)--; /* Save room for NUL-termination. */
const typename out_utf_t::codepoint_t *dst_end = text + *text_size;
while (src < src_end && dst < dst_end)
{
const typename in_utf_t::codepoint_t *src_next = in_utf_t::next (src, src_end, &unicode, replacement);
typename out_utf_t::codepoint_t *dst_next = out_utf_t::encode (dst, dst_end, unicode);
if (dst_next == dst)
break; /* Out-of-room. */
dst = dst_next;
src = src_next;
}
*text_size = dst - text;
*dst = 0; /* NUL-terminate. */
}
/* Accumulate length of rest. */
unsigned int dst_len = dst - text;
while (src < src_end)
{
src = in_utf_t::next (src, src_end, &unicode, replacement);
dst_len += out_utf_t::encode_len (unicode);
}
return dst_len;
}
#define entry_score var.u16[0]
#define entry_index var.u16[1]
/*
* name -- Naming
* https://docs.microsoft.com/en-us/typography/opentype/spec/name
*/
#define HB_OT_TAG_name HB_TAG('n','a','m','e')
#define UNSUPPORTED 42
struct NameRecord
{
hb_language_t language (hb_face_t *face) const
{
#ifndef HB_NO_OT_NAME_LANGUAGE
unsigned int p = platformID;
unsigned int l = languageID;
if (p == 3)
return _hb_ot_name_language_for_ms_code (l);
if (p == 1)
return _hb_ot_name_language_for_mac_code (l);
#ifndef HB_NO_OT_NAME_LANGUAGE_AAT
if (p == 0)
return face->table.ltag->get_language (l);
#endif
#endif
return HB_LANGUAGE_INVALID;
}
uint16_t score () const
{
/* Same order as in cmap::find_best_subtable(). */
unsigned int p = platformID;
unsigned int e = encodingID;
/* 32-bit. */
if (p == 3 && e == 10) return 0;
if (p == 0 && e == 6) return 1;
if (p == 0 && e == 4) return 2;
/* 16-bit. */
if (p == 3 && e == 1) return 3;
if (p == 0 && e == 3) return 4;
if (p == 0 && e == 2) return 5;
if (p == 0 && e == 1) return 6;
if (p == 0 && e == 0) return 7;
/* Symbol. */
if (p == 3 && e == 0) return 8;
/* We treat all Mac Latin names as ASCII only. */
if (p == 1 && e == 0) return 10; /* 10 is magic number :| */
return UNSUPPORTED;
}
NameRecord* copy (hb_serialize_context_t *c, const void *base
#ifdef HB_EXPERIMENTAL_API
, const hb_hashmap_t<hb_ot_name_record_ids_t, hb_bytes_t> *name_table_overrides
#endif
) const
{
TRACE_SERIALIZE (this);
HB_UNUSED auto snap = c->snapshot ();
auto *out = c->embed (this);
if (unlikely (!out)) return_trace (nullptr);
#ifdef HB_EXPERIMENTAL_API
hb_ot_name_record_ids_t record_ids (platformID, encodingID, languageID, nameID);
hb_bytes_t* name_bytes;
if (name_table_overrides->has (record_ids, &name_bytes)) {
hb_bytes_t encoded_bytes = *name_bytes;
char *name_str_utf16_be = nullptr;
if (platformID != 1)
{
unsigned text_size = hb_ot_name_convert_utf<hb_utf8_t, hb_utf16_be_t> (*name_bytes, nullptr, nullptr);
text_size++; // needs to consider NULL terminator for use in hb_ot_name_convert_utf()
unsigned byte_len = text_size * hb_utf16_be_t::codepoint_t::static_size;
name_str_utf16_be = (char *) hb_calloc (byte_len, 1);
if (!name_str_utf16_be)
{
c->revert (snap);
return_trace (nullptr);
}
hb_ot_name_convert_utf<hb_utf8_t, hb_utf16_be_t> (*name_bytes, &text_size,
(hb_utf16_be_t::codepoint_t *) name_str_utf16_be);
unsigned encoded_byte_len = text_size * hb_utf16_be_t::codepoint_t::static_size;
if (!encoded_byte_len || !c->check_assign (out->length, encoded_byte_len, HB_SERIALIZE_ERROR_INT_OVERFLOW)) {
c->revert (snap);
hb_free (name_str_utf16_be);
return_trace (nullptr);
}
encoded_bytes = hb_bytes_t (name_str_utf16_be, encoded_byte_len);
}
else
{
// mac platform, copy the UTF-8 string(all ascii characters) as is
if (!c->check_assign (out->length, encoded_bytes.length, HB_SERIALIZE_ERROR_INT_OVERFLOW)) {
c->revert (snap);
return_trace (nullptr);
}
}
out->offset = 0;
c->push ();
encoded_bytes.copy (c);
c->add_link (out->offset, c->pop_pack (), hb_serialize_context_t::Tail, 0);
hb_free (name_str_utf16_be);
}
else
#endif
{
out->offset.serialize_copy (c, offset, base, 0, hb_serialize_context_t::Tail, length);
}
return_trace (out);
}
bool isUnicode () const
{
unsigned int p = platformID;
unsigned int e = encodingID;
return (p == 0 ||
(p == 3 && (e == 0 || e == 1 || e == 10)));
}
static int cmp (const void *pa, const void *pb)
{
const NameRecord *a = (const NameRecord *)pa;
const NameRecord *b = (const NameRecord *)pb;
if (a->platformID != b->platformID)
return a->platformID - b->platformID;
if (a->encodingID != b->encodingID)
return a->encodingID - b->encodingID;
if (a->languageID != b->languageID)
return a->languageID - b->languageID;
if (a->nameID != b->nameID)
return a->nameID - b->nameID;
if (a->length != b->length)
return a->length - b->length;
return 0;
}
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) && offset.sanitize (c, base, length));
}
HBUINT16 platformID; /* Platform ID. */
HBUINT16 encodingID; /* Platform-specific encoding ID. */
HBUINT16 languageID; /* Language ID. */
HBUINT16 nameID; /* Name ID. */
HBUINT16 length; /* String length (in bytes). */
NNOffset16To<UnsizedArrayOf<HBUINT8>>
offset; /* String offset from start of storage area (in bytes). */
public:
DEFINE_SIZE_STATIC (12);
};
static int
_hb_ot_name_entry_cmp_key (const void *pa, const void *pb, bool exact)
{
const hb_ot_name_entry_t *a = (const hb_ot_name_entry_t *) pa;
const hb_ot_name_entry_t *b = (const hb_ot_name_entry_t *) pb;
/* Compare by name_id, then language. */
if (a->name_id != b->name_id)
return a->name_id - b->name_id;
if (a->language == b->language) return 0;
if (!a->language) return -1;
if (!b->language) return +1;
const char *astr = hb_language_to_string (a->language);
const char *bstr = hb_language_to_string (b->language);
signed c = strcmp (astr, bstr);
// 'a' is the user request, and 'b' is string in the font.
// If eg. user asks for "en-us" and font has "en", approve.
if (!exact && c &&
hb_language_matches (b->language, a->language))
return 0;
return c;
}
static int
_hb_ot_name_entry_cmp (const void *pa, const void *pb)
{
/* Compare by name_id, then language, then score, then index. */
int v = _hb_ot_name_entry_cmp_key (pa, pb, true);
if (v)
return v;
const hb_ot_name_entry_t *a = (const hb_ot_name_entry_t *) pa;
const hb_ot_name_entry_t *b = (const hb_ot_name_entry_t *) pb;
if (a->entry_score != b->entry_score)
return a->entry_score - b->entry_score;
if (a->entry_index != b->entry_index)
return a->entry_index - b->entry_index;
return 0;
}
struct name
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_name;
unsigned int get_size () const
{ return min_size + count * nameRecordZ.item_size; }
template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, const NameRecord &))>
bool serialize (hb_serialize_context_t *c,
Iterator it,
const void *src_string_pool
#ifdef HB_EXPERIMENTAL_API
, const hb_vector_t<hb_ot_name_record_ids_t>& insert_name_records
, const hb_hashmap_t<hb_ot_name_record_ids_t, hb_bytes_t> *name_table_overrides
#endif
)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min ((*this)))) return_trace (false);
unsigned total_count = it.len ()
#ifdef HB_EXPERIMENTAL_API
+ insert_name_records.length
#endif
;
this->format = 0;
if (!c->check_assign (this->count, total_count, HB_SERIALIZE_ERROR_INT_OVERFLOW))
return false;
NameRecord *name_records = (NameRecord *) hb_calloc (total_count, NameRecord::static_size);
if (unlikely (!name_records)) return_trace (false);
hb_array_t<NameRecord> records (name_records, total_count);
for (const NameRecord& record : it)
{
hb_memcpy (name_records, &record, NameRecord::static_size);
name_records++;
}
#ifdef HB_EXPERIMENTAL_API
for (unsigned i = 0; i < insert_name_records.length; i++)
{
const hb_ot_name_record_ids_t& ids = insert_name_records[i];
NameRecord record;
record.platformID = ids.platform_id;
record.encodingID = ids.encoding_id;
record.languageID = ids.language_id;
record.nameID = ids.name_id;
record.length = 0; // handled in NameRecord copy()
record.offset = 0;
memcpy (name_records, &record, NameRecord::static_size);
name_records++;
}
#endif
records.qsort ();
c->copy_all (records,
src_string_pool
#ifdef HB_EXPERIMENTAL_API
, name_table_overrides
#endif
);
hb_free (records.arrayZ);
if (unlikely (c->ran_out_of_room ())) return_trace (false);
this->stringOffset = c->length ();
return_trace (true);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
name *name_prime = c->serializer->start_embed<name> ();
if (unlikely (!name_prime)) return_trace (false);
#ifdef HB_EXPERIMENTAL_API
const hb_hashmap_t<hb_ot_name_record_ids_t, hb_bytes_t> *name_table_overrides =
&c->plan->name_table_overrides;
#endif
auto it =
+ nameRecordZ.as_array (count)
| hb_filter (c->plan->name_ids, &NameRecord::nameID)
| hb_filter (c->plan->name_languages, &NameRecord::languageID)
| hb_filter ([&] (const NameRecord& namerecord) {
return
(c->plan->flags & HB_SUBSET_FLAGS_NAME_LEGACY)
|| namerecord.isUnicode ();
})
#ifdef HB_EXPERIMENTAL_API
| hb_filter ([&] (const NameRecord& namerecord) {
if (name_table_overrides->is_empty ())
return true;
hb_ot_name_record_ids_t rec_ids (namerecord.platformID,
namerecord.encodingID,
namerecord.languageID,
namerecord.nameID);
hb_bytes_t *p;
if (name_table_overrides->has (rec_ids, &p) &&
(*p).length == 0)
return false;
return true;
})
#endif
;
#ifdef HB_EXPERIMENTAL_API
hb_hashmap_t<hb_ot_name_record_ids_t, unsigned> retained_name_record_ids;
for (const NameRecord& rec : it)
{
hb_ot_name_record_ids_t rec_ids (rec.platformID,
rec.encodingID,
rec.languageID,
rec.nameID);
retained_name_record_ids.set (rec_ids, 1);
}
hb_vector_t<hb_ot_name_record_ids_t> insert_name_records;
if (!name_table_overrides->is_empty ())
{
if (unlikely (!insert_name_records.alloc (name_table_overrides->get_population (), true)))
return_trace (false);
for (const auto& record_ids : name_table_overrides->keys ())
{
if (name_table_overrides->get (record_ids).length == 0)
continue;
if (retained_name_record_ids.has (record_ids))
continue;
insert_name_records.push (record_ids);
}
}
#endif
return (name_prime->serialize (c->serializer, it,
std::addressof (this + stringOffset)
#ifdef HB_EXPERIMENTAL_API
, insert_name_records
, name_table_overrides
#endif
));
}
bool sanitize_records (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
const void *string_pool = (this+stringOffset).arrayZ;
return_trace (nameRecordZ.sanitize (c, count, string_pool));
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
likely (format == 0 || format == 1) &&
c->check_array (nameRecordZ.arrayZ, count) &&
c->check_range (this, stringOffset) &&
sanitize_records (c));
}
struct accelerator_t
{
accelerator_t (hb_face_t *face)
{
this->table = hb_sanitize_context_t ().reference_table<name> (face);
assert (this->table.get_length () >= this->table->stringOffset);
this->pool = (const char *) (const void *) (this->table+this->table->stringOffset);
this->pool_len = this->table.get_length () - this->table->stringOffset;
const hb_array_t<const NameRecord> all_names (this->table->nameRecordZ.arrayZ,
this->table->count);
this->names.alloc (all_names.length, true);
for (unsigned int i = 0; i < all_names.length; i++)
{
hb_ot_name_entry_t *entry = this->names.push ();
entry->name_id = all_names[i].nameID;
entry->language = all_names[i].language (face);
entry->entry_score = all_names[i].score ();
entry->entry_index = i;
}
this->names.qsort (_hb_ot_name_entry_cmp);
/* Walk and pick best only for each name_id,language pair,
* while dropping unsupported encodings. */
unsigned int j = 0;
for (unsigned int i = 0; i < this->names.length; i++)
{
if (this->names[i].entry_score == UNSUPPORTED ||
this->names[i].language == HB_LANGUAGE_INVALID)
continue;
if (i &&
this->names[i - 1].name_id == this->names[i].name_id &&
this->names[i - 1].language == this->names[i].language)
continue;
this->names[j++] = this->names[i];
}
this->names.resize (j);
}
~accelerator_t ()
{
this->table.destroy ();
}
int get_index (hb_ot_name_id_t name_id,
hb_language_t language,
unsigned int *width=nullptr) const
{
const hb_ot_name_entry_t key = {name_id, {0}, language};
const hb_ot_name_entry_t *entry = hb_bsearch (key, (const hb_ot_name_entry_t *) this->names,
this->names.length,
sizeof (hb_ot_name_entry_t),
_hb_ot_name_entry_cmp_key,
true);
if (!entry)
{
entry = hb_bsearch (key, (const hb_ot_name_entry_t *) this->names,
this->names.length,
sizeof (hb_ot_name_entry_t),
_hb_ot_name_entry_cmp_key,
false);
}
if (!entry)
return -1;
if (width)
*width = entry->entry_score < 10 ? 2 : 1;
return entry->entry_index;
}
hb_bytes_t get_name (unsigned int idx) const
{
const hb_array_t<const NameRecord> all_names (table->nameRecordZ.arrayZ, table->count);
const NameRecord &record = all_names[idx];
const hb_bytes_t string_pool (pool, pool_len);
return string_pool.sub_array (record.offset, record.length);
}
private:
const char *pool;
unsigned int pool_len;
public:
hb_blob_ptr_t<name> table;
hb_vector_t<hb_ot_name_entry_t> names;
};
public:
/* We only implement format 0 for now. */
HBUINT16 format; /* Format selector (=0/1). */
HBUINT16 count; /* Number of name records. */
NNOffset16To<UnsizedArrayOf<HBUINT8>>
stringOffset; /* Offset to start of string storage (from start of table). */
UnsizedArrayOf<NameRecord>
nameRecordZ; /* The name records where count is the number of records. */
public:
DEFINE_SIZE_ARRAY (6, nameRecordZ);
};
#undef entry_index
#undef entry_score
struct name_accelerator_t : name::accelerator_t {
name_accelerator_t (hb_face_t *face) : name::accelerator_t (face) {}
};
} /* namespace OT */
#endif /* OT_NAME_NAME_HH */

View File

@ -1,53 +1,118 @@
Tips and tasks when updating harfbuzz sources to a newer version.
-----------------------------------------------------------------
STEP 1: UPDATING FILES
----------------------
Download and unzip the latest version from https://github.com/harfbuzz/harfbuzz/releases
We only use files from the src directory and even then only the ones we need.
So just C++ include and source files and only the ones needed for the library,
and even then just the ones we use. Do NOT just copy everything.
and even then just the ones we use.
IMPORTANT! DO NOT just copy everything.
So one way to update is to
- copy over from the updated harfbuzz the exact same files we already have
- it isn't a flat directory so watch out for that
- any that are no longer available (copy fails) we remove but these may come
back later if they were actually renamed
- look for files in the destination that were NOT updated - perhaps they
are gone in the upstream - or renamed. Remove them if they are really
obsolete, or add their replacements/renames.
- iterate over : build and see what new file is missing that causes a build failure
- when this is done we have something buildable
- make sure it builds on all supported platforms.
- Harfbuzz is not modular so it is not easy,
- Harfbuzz is not modular, so the update is not a straightforward process.
- The main thing is we do NOT want any
* "test" programs (test in the name, have a main are clues)
* support for (eg) GLib, DirectWrite, Graphite, GDI, ICU, Uniscribe
* aggregators like harfbuzz.cc - since it includes things from the above
as well as hb-ft.cc which we specifically exclude in the Makefile
* but we do use core text support on macOS.
* I really wish that "src" were just library source but I expect the authors
* but we do use core text support on macOS.
* I really wish that "src" were just library source, but I expect the authors
have their reasons.
- we do not apply any header file changes so this is not an issue
- verify the license text is unchanged (extra steps are needed if it is) and update
src/java.desktop/share/legal/harfbuzz.md with the new version
- clean up trailing white space and tabs to follow jcheck rules.
Use "expand" and "sed" to remove tabs and trailing white space from the
imported sources.
- test using all the automated jtreg tests on all platforms
- do manual verification of Arabic, Hebrew, Thai, Indic against previous releases.
Look for manual related layout jtreg tests and run on Windows,Linux and Mac.
Use Font2DTest set to TextLayout and check the above languages. Probably
not going to see layout problems a code point at a time but it needs to
be checked.
So one way to update is to
- Update make parameters as needed
- copy over from the updated harfbuzz the exact same files we already have
- it isn't a flat directory so watch out for that.
- For files that are no longer available (for which copy fails), we remove such files,
but these may come back later if they were actually renamed.
- look for files in the destination that were NOT updated - perhaps they
are removed or renamed in the upstream. Remove them if they are really
obsolete, or add their replacements/renames.
In IntelliJ IDE:
Newly added files are shown in RED
Modified in BLUE
NOT Updated in WHITE
This feature might be helpful to keep track of new, modified and unchanged files.
STEP 2: BUILD CHANGES INCREMENTALLY
-----------------------------------
- iterate over : build and see what new file is missing that causes a build failure.
Sometimes just running a build does not show up any failures due to stale files.
Clean followed by build would be helpful in this situation.
- You might run into compiler warnings that are treated as errors or the requirement
to set certain compiler flags if the build fails on a specific platform.
Check "COMPILER WARNINGS AND SETTING FLAGS" section for more details.
- when this is done we have something buildable, make sure it builds
on all supported platforms.
STEP 3: COMPILER WARNINGS AND SETTING FLAGS
-------------------------------------------
- Update make parameters in Awt2DLibraries.gmk
Since we don't use configure we need to manually specify the options
we need in the harfbuzz section of Awt2DLibraries.gmk.
we need in the Harfbuzz section of Awt2DLibraries.gmk.
As well as adding new options, we may need to clean up obsolete options.
Note there may be platform variations in the flags.
- As with other 3rd party libs we do not fix the code to eliminate compiler
warnings unless they are critical and clearly avoiding a bug. Even then
we'd report it upstream. The usual practice is do just disable the warnings
we'd report it upstream and apply the patch once it is made available.
The usual practice is do just disable the warnings.
- Update THIS UPDATING.txt file too if it is outdated.
STEP 4: UPDATING .md FILE
-------------------------
- we do not apply any header file changes so this is not an issue.
- verify the license text is unchanged (extra steps are needed if it is) and update
src/java.desktop/share/legal/harfbuzz.md with the new version.
STEP 5: REPLACE TABS & REMOVE TRAILING SPACES
---------------------------------------------
- clean up trailing white space and tabs to follow jcheck rules.
Use "expand" and "sed" to remove tabs and trailing white space from the
imported sources.
To clean up the extra spaces and tabs run the following script at
each folder level within libharfbuzz.
for f in *.c *.h *.cc *.hh;
do
# replace tabs with spaces
expand ${f} > ${f}.tmp;
mv ${f}.tmp $f;
# fix line endings to LF
sed -e 's/\r$//g' ${f} > ${f}.tmp;
mv ${f}.tmp $f;
# remove trailing spaces
sed -e 's/[ ]* $//g' ${f} > ${f}.tmp;
mv ${f}.tmp $f;
done
STEP 6: TESTING
---------------
- test using all the automated jtreg tests on all platforms.
- do MANUAL verification of Arabic, Hebrew, Thai, Indic against previous releases.
Look for manual related layout jtreg tests (test/jdk/java/awt/font/TextLayout)
and run on Windows,Linux and Mac.
Use Font2DTest set to TextLayout and check the above languages. Probably
not going to see layout problems a code point at a time but it needs to
be checked.
Different unicode combinations can be checked using Font2DTest.
Run Font2DTest, select 'UserText' option for 'Text to use'.
Paste unicodes of different languages (Arabic, Hebrew, Thai, Indic)
and compare the glyphs with previous versions.
It should look the same in both cases.
- FINALLY, Do update THIS UPDATING.txt file too if it is outdated.

View File

@ -0,0 +1,216 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "graph.hh"
#include "../hb-ot-layout-common.hh"
#ifndef GRAPH_CLASSDEF_GRAPH_HH
#define GRAPH_CLASSDEF_GRAPH_HH
namespace graph {
struct ClassDefFormat1 : public OT::ClassDefFormat1_3<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::ClassDefFormat1_3<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
return vertex_len >= min_size + classValue.get_size () - classValue.len.get_size ();
}
};
struct ClassDefFormat2 : public OT::ClassDefFormat2_4<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::ClassDefFormat2_4<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
return vertex_len >= min_size + rangeRecord.get_size () - rangeRecord.len.get_size ();
}
};
struct ClassDef : public OT::ClassDef
{
template<typename It>
static bool add_class_def (gsubgpos_graph_context_t& c,
unsigned parent_id,
unsigned link_position,
It glyph_and_class,
unsigned max_size)
{
unsigned class_def_prime_id = c.graph.new_node (nullptr, nullptr);
auto& class_def_prime_vertex = c.graph.vertices_[class_def_prime_id];
if (!make_class_def (c, glyph_and_class, class_def_prime_id, max_size))
return false;
auto* class_def_link = c.graph.vertices_[parent_id].obj.real_links.push ();
class_def_link->width = SmallTypes::size;
class_def_link->objidx = class_def_prime_id;
class_def_link->position = link_position;
class_def_prime_vertex.parents.push (parent_id);
return true;
}
template<typename It>
static bool make_class_def (gsubgpos_graph_context_t& c,
It glyph_and_class,
unsigned dest_obj,
unsigned max_size)
{
char* buffer = (char*) hb_calloc (1, max_size);
hb_serialize_context_t serializer (buffer, max_size);
OT::ClassDef_serialize (&serializer, glyph_and_class);
serializer.end_serialize ();
if (serializer.in_error ())
{
hb_free (buffer);
return false;
}
hb_bytes_t class_def_copy = serializer.copy_bytes ();
c.add_buffer ((char *) class_def_copy.arrayZ); // Give ownership to the context, it will cleanup the buffer.
auto& obj = c.graph.vertices_[dest_obj].obj;
obj.head = (char *) class_def_copy.arrayZ;
obj.tail = obj.head + class_def_copy.length;
hb_free (buffer);
return true;
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::ClassDef::min_size) return false;
switch (u.format)
{
case 1: return ((ClassDefFormat1*)this)->sanitize (vertex);
case 2: return ((ClassDefFormat2*)this)->sanitize (vertex);
#ifndef HB_NO_BEYOND_64K
// Not currently supported
case 3:
case 4:
#endif
default: return false;
}
}
};
struct class_def_size_estimator_t
{
template<typename It>
class_def_size_estimator_t (It glyph_and_class)
: gids_consecutive (true), num_ranges_per_class (), glyphs_per_class ()
{
unsigned last_gid = (unsigned) -1;
for (auto p : + glyph_and_class)
{
unsigned gid = p.first;
unsigned klass = p.second;
if (last_gid != (unsigned) -1 && gid != last_gid + 1)
gids_consecutive = false;
last_gid = gid;
hb_set_t* glyphs;
if (glyphs_per_class.has (klass, &glyphs) && glyphs) {
glyphs->add (gid);
continue;
}
hb_set_t new_glyphs;
new_glyphs.add (gid);
glyphs_per_class.set (klass, std::move (new_glyphs));
}
if (in_error ()) return;
for (unsigned klass : glyphs_per_class.keys ())
{
if (!klass) continue; // class 0 doesn't get encoded.
const hb_set_t& glyphs = glyphs_per_class.get (klass);
hb_codepoint_t start = HB_SET_VALUE_INVALID;
hb_codepoint_t end = HB_SET_VALUE_INVALID;
unsigned count = 0;
while (glyphs.next_range (&start, &end))
count++;
num_ranges_per_class.set (klass, count);
}
}
// Incremental increase in the Coverage and ClassDef table size
// (worst case) if all glyphs associated with 'klass' were added.
unsigned incremental_coverage_size (unsigned klass) const
{
// Coverage takes 2 bytes per glyph worst case,
return 2 * glyphs_per_class.get (klass).get_population ();
}
// Incremental increase in the Coverage and ClassDef table size
// (worst case) if all glyphs associated with 'klass' were added.
unsigned incremental_class_def_size (unsigned klass) const
{
// ClassDef takes 6 bytes per range
unsigned class_def_2_size = 6 * num_ranges_per_class.get (klass);
if (gids_consecutive)
{
// ClassDef1 takes 2 bytes per glyph, but only can be used
// when gids are consecutive.
return hb_min (2 * glyphs_per_class.get (klass).get_population (), class_def_2_size);
}
return class_def_2_size;
}
bool in_error ()
{
if (num_ranges_per_class.in_error ()) return true;
if (glyphs_per_class.in_error ()) return true;
for (const hb_set_t& s : glyphs_per_class.values ())
{
if (s.in_error ()) return true;
}
return false;
}
private:
bool gids_consecutive;
hb_hashmap_t<unsigned, unsigned> num_ranges_per_class;
hb_hashmap_t<unsigned, hb_set_t> glyphs_per_class;
};
}
#endif // GRAPH_CLASSDEF_GRAPH_HH

View File

@ -0,0 +1,152 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "graph.hh"
#include "../OT/Layout/Common/Coverage.hh"
#ifndef GRAPH_COVERAGE_GRAPH_HH
#define GRAPH_COVERAGE_GRAPH_HH
namespace graph {
struct CoverageFormat1 : public OT::Layout::Common::CoverageFormat1_3<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::Layout::Common::CoverageFormat1_3<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
return vertex_len >= min_size + glyphArray.get_size () - glyphArray.len.get_size ();
}
};
struct CoverageFormat2 : public OT::Layout::Common::CoverageFormat2_4<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::Layout::Common::CoverageFormat2_4<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
return vertex_len >= min_size + rangeRecord.get_size () - rangeRecord.len.get_size ();
}
};
struct Coverage : public OT::Layout::Common::Coverage
{
static Coverage* clone_coverage (gsubgpos_graph_context_t& c,
unsigned coverage_id,
unsigned new_parent_id,
unsigned link_position,
unsigned start, unsigned end)
{
unsigned coverage_size = c.graph.vertices_[coverage_id].table_size ();
auto& coverage_v = c.graph.vertices_[coverage_id];
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
if (!coverage_table || !coverage_table->sanitize (coverage_v))
return nullptr;
auto new_coverage =
+ hb_zip (coverage_table->iter (), hb_range ())
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
return p.second >= start && p.second < end;
})
| hb_map_retains_sorting (hb_first)
;
return add_coverage (c, new_parent_id, link_position, new_coverage, coverage_size);
}
template<typename It>
static Coverage* add_coverage (gsubgpos_graph_context_t& c,
unsigned parent_id,
unsigned link_position,
It glyphs,
unsigned max_size)
{
unsigned coverage_prime_id = c.graph.new_node (nullptr, nullptr);
auto& coverage_prime_vertex = c.graph.vertices_[coverage_prime_id];
if (!make_coverage (c, glyphs, coverage_prime_id, max_size))
return nullptr;
auto* coverage_link = c.graph.vertices_[parent_id].obj.real_links.push ();
coverage_link->width = SmallTypes::size;
coverage_link->objidx = coverage_prime_id;
coverage_link->position = link_position;
coverage_prime_vertex.parents.push (parent_id);
return (Coverage*) coverage_prime_vertex.obj.head;
}
template<typename It>
static bool make_coverage (gsubgpos_graph_context_t& c,
It glyphs,
unsigned dest_obj,
unsigned max_size)
{
char* buffer = (char*) hb_calloc (1, max_size);
hb_serialize_context_t serializer (buffer, max_size);
OT::Layout::Common::Coverage_serialize (&serializer, glyphs);
serializer.end_serialize ();
if (serializer.in_error ())
{
hb_free (buffer);
return false;
}
hb_bytes_t coverage_copy = serializer.copy_bytes ();
c.add_buffer ((char *) coverage_copy.arrayZ); // Give ownership to the context, it will cleanup the buffer.
auto& obj = c.graph.vertices_[dest_obj].obj;
obj.head = (char *) coverage_copy.arrayZ;
obj.tail = obj.head + coverage_copy.length;
hb_free (buffer);
return true;
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::Layout::Common::Coverage::min_size) return false;
switch (u.format)
{
case 1: return ((CoverageFormat1*)this)->sanitize (vertex);
case 2: return ((CoverageFormat2*)this)->sanitize (vertex);
#ifndef HB_NO_BEYOND_64K
// Not currently supported
case 3:
case 4:
#endif
default: return false;
}
}
};
}
#endif // GRAPH_COVERAGE_GRAPH_HH

View File

@ -24,6 +24,10 @@
* Google Author(s): Garret Rieger
*/
#include "../hb-set.hh"
#include "../hb-priority-queue.hh"
#include "../hb-serialize.hh"
#ifndef GRAPH_GRAPH_HH
#define GRAPH_GRAPH_HH
@ -45,6 +49,95 @@ struct graph_t
unsigned end = 0;
unsigned priority = 0;
bool link_positions_valid (unsigned num_objects, bool removed_nil)
{
hb_set_t assigned_bytes;
for (const auto& l : obj.real_links)
{
if (l.objidx >= num_objects
|| (removed_nil && !l.objidx))
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
"Invalid graph. Invalid object index.");
return false;
}
unsigned start = l.position;
unsigned end = start + l.width - 1;
if (unlikely (l.width < 2 || l.width > 4))
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
"Invalid graph. Invalid link width.");
return false;
}
if (unlikely (end >= table_size ()))
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
"Invalid graph. Link position is out of bounds.");
return false;
}
if (unlikely (assigned_bytes.intersects (start, end)))
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
"Invalid graph. Found offsets whose positions overlap.");
return false;
}
assigned_bytes.add_range (start, end);
}
return !assigned_bytes.in_error ();
}
void normalize ()
{
obj.real_links.qsort ();
for (auto& l : obj.real_links)
{
for (unsigned i = 0; i < l.width; i++)
{
obj.head[l.position + i] = 0;
}
}
}
bool equals (const vertex_t& other,
const graph_t& graph,
const graph_t& other_graph,
unsigned depth) const
{
if (!(as_bytes () == other.as_bytes ()))
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
"vertex [%lu] bytes != [%lu] bytes, depth = %u",
(unsigned long) table_size (),
(unsigned long) other.table_size (),
depth);
auto a = as_bytes ();
auto b = other.as_bytes ();
while (a || b)
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" 0x%x %s 0x%x", (unsigned) *a, (*a == *b) ? "==" : "!=", (unsigned) *b);
a++;
b++;
}
return false;
}
return links_equal (obj.real_links, other.obj.real_links, graph, other_graph, depth);
}
hb_bytes_t as_bytes () const
{
return hb_bytes_t (obj.head, table_size ());
}
friend void swap (vertex_t& a, vertex_t& b)
{
hb_swap (a.obj, b.obj);
@ -56,6 +149,18 @@ struct graph_t
hb_swap (a.priority, b.priority);
}
hb_hashmap_t<unsigned, unsigned>
position_to_index_map () const
{
hb_hashmap_t<unsigned, unsigned> result;
for (const auto& l : obj.real_links) {
result.set (l.position, l.objidx);
}
return result;
}
bool is_shared () const
{
return parents.length > 1;
@ -71,11 +176,27 @@ struct graph_t
for (unsigned i = 0; i < parents.length; i++)
{
if (parents[i] != parent_index) continue;
parents.remove (i);
parents.remove_unordered (i);
break;
}
}
void remove_real_link (unsigned child_index, const void* offset)
{
for (unsigned i = 0; i < obj.real_links.length; i++)
{
auto& link = obj.real_links.arrayZ[i];
if (link.objidx != child_index)
continue;
if ((obj.head + link.position) != offset)
continue;
obj.real_links.remove_unordered (i);
return;
}
}
void remap_parents (const hb_vector_t<unsigned>& id_map)
{
for (unsigned i = 0; i < parents.length; i++)
@ -107,6 +228,10 @@ struct graph_t
return priority >= 3;
}
size_t table_size () const {
return obj.tail - obj.head;
}
int64_t modified_distance (unsigned order) const
{
// TODO(garretrieger): once priority is high enough, should try
@ -131,6 +256,57 @@ struct graph_t
return -table_size;
}
private:
bool links_equal (const hb_vector_t<hb_serialize_context_t::object_t::link_t>& this_links,
const hb_vector_t<hb_serialize_context_t::object_t::link_t>& other_links,
const graph_t& graph,
const graph_t& other_graph,
unsigned depth) const
{
auto a = this_links.iter ();
auto b = other_links.iter ();
while (a && b)
{
const auto& link_a = *a;
const auto& link_b = *b;
if (link_a.width != link_b.width ||
link_a.is_signed != link_b.is_signed ||
link_a.whence != link_b.whence ||
link_a.position != link_b.position ||
link_a.bias != link_b.bias)
return false;
if (!graph.vertices_[link_a.objidx].equals (
other_graph.vertices_[link_b.objidx], graph, other_graph, depth + 1))
return false;
a++;
b++;
}
if (bool (a) != bool (b))
return false;
return true;
}
};
template <typename T>
struct vertex_and_table_t
{
vertex_and_table_t () : index (0), vertex (nullptr), table (nullptr)
{}
unsigned index;
vertex_t* vertex;
T* table;
operator bool () {
return table && vertex;
}
};
/*
@ -145,7 +321,8 @@ struct graph_t
: parents_invalid (true),
distance_invalid (true),
positions_invalid (true),
successful (true)
successful (true),
buffers ()
{
num_roots_for_space_.push (1);
bool removed_nil = false;
@ -153,8 +330,6 @@ struct graph_t
vertices_scratch_.alloc (objects.length);
for (unsigned i = 0; i < objects.length; i++)
{
// TODO(grieger): check all links point to valid objects.
// If this graph came from a serialization buffer object 0 is the
// nil object. We don't need it for our purposes here so drop it.
if (i == 0 && !objects[i])
@ -166,6 +341,9 @@ struct graph_t
vertex_t* v = vertices_.push ();
if (check_success (!vertices_.in_error ()))
v->obj = *objects[i];
check_success (v->link_positions_valid (objects.length, removed_nil));
if (!removed_nil) continue;
// Fix indices to account for removed nil object.
for (auto& l : v->obj.all_links_writer ()) {
@ -177,6 +355,20 @@ struct graph_t
~graph_t ()
{
vertices_.fini ();
for (char* b : buffers)
hb_free (b);
}
bool operator== (const graph_t& other) const
{
return root ().equals (other.root (), *this, other, 0);
}
// Sorts links of all objects in a consistent manner and zeroes all offsets.
void normalize ()
{
for (auto& v : vertices_.writer ())
v.normalize ();
}
bool in_error () const
@ -199,11 +391,43 @@ struct graph_t
return vertices_.length - 1;
}
const hb_serialize_context_t::object_t& object(unsigned i) const
const hb_serialize_context_t::object_t& object (unsigned i) const
{
return vertices_[i].obj;
}
void add_buffer (char* buffer)
{
buffers.push (buffer);
}
/*
* Adds a 16 bit link from parent_id to child_id
*/
template<typename T>
void add_link (T* offset,
unsigned parent_id,
unsigned child_id)
{
auto& v = vertices_[parent_id];
auto* link = v.obj.real_links.push ();
link->width = 2;
link->objidx = child_id;
link->position = (char*) offset - (char*) v.obj.head;
vertices_[child_id].parents.push (parent_id);
}
/*
* Generates a new topological sorting of graph ordered by the shortest
* distance to each node if positions are marked as invalid.
*/
void sort_shortest_distance_if_needed ()
{
if (!positions_invalid) return;
sort_shortest_distance ();
}
/*
* Generates a new topological sorting of graph ordered by the shortest
* distance to each node.
@ -239,6 +463,13 @@ struct graph_t
hb_swap (sorted_graph[new_id], vertices_[next_id]);
const vertex_t& next = sorted_graph[new_id];
if (unlikely (!check_success(new_id >= 0))) {
// We are out of ids. Which means we've visited a node more than once.
// This graph contains a cycle which is not allowed.
DEBUG_MSG (SUBSET_REPACK, nullptr, "Invalid graph. Contains cycle.");
return;
}
id_map[next_id] = new_id--;
for (const auto& link : next.obj.all_links ()) {
@ -256,44 +487,152 @@ struct graph_t
check_success (!queue.in_error ());
check_success (!sorted_graph.in_error ());
if (!check_success (new_id == -1))
print_orphaned_nodes ();
remap_all_obj_indices (id_map, &sorted_graph);
hb_swap (vertices_, sorted_graph);
if (!check_success (new_id == -1))
print_orphaned_nodes ();
}
/*
* Assign unique space numbers to each connected subgraph of 32 bit offset(s).
* Finds the set of nodes (placed into roots) that should be assigned unique spaces.
* More specifically this looks for the top most 24 bit or 32 bit links in the graph.
* Some special casing is done that is specific to the layout of GSUB/GPOS tables.
*/
bool assign_32bit_spaces ()
void find_space_roots (hb_set_t& visited, hb_set_t& roots)
{
unsigned root_index = root_idx ();
hb_set_t visited;
hb_set_t roots;
for (unsigned i = 0; i <= root_index; i++)
int root_index = (int) root_idx ();
for (int i = root_index; i >= 0; i--)
{
if (visited.has (i)) continue;
// Only real links can form 32 bit spaces
for (auto& l : vertices_[i].obj.real_links)
{
if (l.width == 4 && !l.is_signed)
if (l.is_signed || l.width < 3)
continue;
if (i == root_index && l.width == 3)
// Ignore 24bit links from the root node, this skips past the single 24bit
// pointer to the lookup list.
continue;
if (l.width == 3)
{
roots.add (l.objidx);
find_subgraph (l.objidx, visited);
// A 24bit offset forms a root, unless there is 32bit offsets somewhere
// in it's subgraph, then those become the roots instead. This is to make sure
// that extension subtables beneath a 24bit lookup become the spaces instead
// of the offset to the lookup.
hb_set_t sub_roots;
find_32bit_roots (l.objidx, sub_roots);
if (sub_roots) {
for (unsigned sub_root_idx : sub_roots) {
roots.add (sub_root_idx);
find_subgraph (sub_root_idx, visited);
}
continue;
}
}
roots.add (l.objidx);
find_subgraph (l.objidx, visited);
}
}
}
template <typename T, typename ...Ts>
vertex_and_table_t<T> as_table (unsigned parent, const void* offset, Ts... ds)
{
return as_table_from_index<T> (index_for_offset (parent, offset), std::forward<Ts>(ds)...);
}
template <typename T, typename ...Ts>
vertex_and_table_t<T> as_mutable_table (unsigned parent, const void* offset, Ts... ds)
{
return as_table_from_index<T> (mutable_index_for_offset (parent, offset), std::forward<Ts>(ds)...);
}
template <typename T, typename ...Ts>
vertex_and_table_t<T> as_table_from_index (unsigned index, Ts... ds)
{
if (index >= vertices_.length)
return vertex_and_table_t<T> ();
vertex_and_table_t<T> r;
r.vertex = &vertices_[index];
r.table = (T*) r.vertex->obj.head;
r.index = index;
if (!r.table)
return vertex_and_table_t<T> ();
if (!r.table->sanitize (*(r.vertex), std::forward<Ts>(ds)...))
return vertex_and_table_t<T> ();
return r;
}
// Finds the object id of the object pointed to by the offset at 'offset'
// within object[node_idx].
unsigned index_for_offset (unsigned node_idx, const void* offset) const
{
const auto& node = object (node_idx);
if (offset < node.head || offset >= node.tail) return -1;
unsigned length = node.real_links.length;
for (unsigned i = 0; i < length; i++)
{
// Use direct access for increased performance, this is a hot method.
const auto& link = node.real_links.arrayZ[i];
if (offset != node.head + link.position)
continue;
return link.objidx;
}
return -1;
}
// Finds the object id of the object pointed to by the offset at 'offset'
// within object[node_idx]. Ensures that the returned object is safe to mutate.
// That is, if the original child object is shared by parents other than node_idx
// it will be duplicated and the duplicate will be returned instead.
unsigned mutable_index_for_offset (unsigned node_idx, const void* offset)
{
unsigned child_idx = index_for_offset (node_idx, offset);
auto& child = vertices_[child_idx];
for (unsigned p : child.parents)
{
if (p != node_idx) {
return duplicate (node_idx, child_idx);
}
}
// Mark everything not in the subgraphs of 32 bit roots as visited.
// This prevents 32 bit subgraphs from being connected via nodes not in the 32 bit subgraphs.
return child_idx;
}
/*
* Assign unique space numbers to each connected subgraph of 24 bit and/or 32 bit offset(s).
* Currently, this is implemented specifically tailored to the structure of a GPOS/GSUB
* (including with 24bit offsets) table.
*/
bool assign_spaces ()
{
update_parents ();
hb_set_t visited;
hb_set_t roots;
find_space_roots (visited, roots);
// Mark everything not in the subgraphs of the roots as visited. This prevents
// subgraphs from being connected via nodes not in those subgraphs.
visited.invert ();
if (!roots) return false;
while (roots)
{
unsigned next = HB_SET_VALUE_INVALID;
uint32_t next = HB_SET_VALUE_INVALID;
if (unlikely (!check_success (!roots.in_error ()))) break;
if (!roots.next (&next)) break;
@ -361,6 +700,9 @@ struct graph_t
}
}
if (in_error ())
return false;
if (!made_changes)
return false;
@ -374,8 +716,8 @@ struct graph_t
auto new_subgraph =
+ subgraph.keys ()
| hb_map([&] (unsigned node_idx) {
const unsigned *v;
| hb_map([&] (uint32_t node_idx) {
const uint32_t *v;
if (index_map.has (node_idx, &v)) return *v;
return node_idx;
})
@ -385,10 +727,10 @@ struct graph_t
remap_obj_indices (index_map, parents.iter (), true);
// Update roots set with new indices as needed.
unsigned next = HB_SET_VALUE_INVALID;
uint32_t next = HB_SET_VALUE_INVALID;
while (roots.next (&next))
{
const unsigned *v;
const uint32_t *v;
if (index_map.has (next, &v))
{
roots.del (next);
@ -403,7 +745,7 @@ struct graph_t
{
for (const auto& link : vertices_[node_idx].obj.all_links ())
{
const unsigned *v;
const uint32_t *v;
if (subgraph.has (link.objidx, &v))
{
subgraph.set (link.objidx, *v + 1);
@ -422,6 +764,68 @@ struct graph_t
find_subgraph (link.objidx, subgraph);
}
size_t find_subgraph_size (unsigned node_idx, hb_set_t& subgraph, unsigned max_depth = -1)
{
if (subgraph.has (node_idx)) return 0;
subgraph.add (node_idx);
const auto& o = vertices_[node_idx].obj;
size_t size = o.tail - o.head;
if (max_depth == 0)
return size;
for (const auto& link : o.all_links ())
size += find_subgraph_size (link.objidx, subgraph, max_depth - 1);
return size;
}
/*
* Finds the topmost children of 32bit offsets in the subgraph starting
* at node_idx. Found indices are placed into 'found'.
*/
void find_32bit_roots (unsigned node_idx, hb_set_t& found)
{
for (const auto& link : vertices_[node_idx].obj.all_links ())
{
if (!link.is_signed && link.width == 4) {
found.add (link.objidx);
continue;
}
find_32bit_roots (link.objidx, found);
}
}
/*
* Moves the child of old_parent_idx pointed to by old_offset to a new
* vertex at the new_offset.
*/
template<typename O>
void move_child (unsigned old_parent_idx,
const O* old_offset,
unsigned new_parent_idx,
const O* new_offset)
{
distance_invalid = true;
positions_invalid = true;
auto& old_v = vertices_[old_parent_idx];
auto& new_v = vertices_[new_parent_idx];
unsigned child_id = index_for_offset (old_parent_idx,
old_offset);
auto* new_link = new_v.obj.real_links.push ();
new_link->width = O::static_size;
new_link->objidx = child_id;
new_link->position = (const char*) new_offset - (const char*) new_v.obj.head;
auto& child = vertices_[child_id];
child.parents.push (new_parent_idx);
old_v.remove_real_link (child_id, old_offset);
child.remove_parent (old_parent_idx);
}
/*
* duplicates all nodes in the subgraph reachable from node_idx. Does not re-assign
* links. index_map is updated with mappings from old id to new id. If a duplication has already
@ -432,7 +836,11 @@ struct graph_t
if (index_map.has (node_idx))
return;
index_map.set (node_idx, duplicate (node_idx));
unsigned clone_idx = duplicate (node_idx);
if (!check_success (clone_idx != (unsigned) -1))
return;
index_map.set (node_idx, clone_idx);
for (const auto& l : object (node_idx).all_links ()) {
duplicate_subgraph (l.objidx, index_map);
}
@ -490,7 +898,20 @@ struct graph_t
* parent to the clone. The copy is a shallow copy, objects
* linked from child are not duplicated.
*/
bool duplicate (unsigned parent_idx, unsigned child_idx)
unsigned duplicate_if_shared (unsigned parent_idx, unsigned child_idx)
{
unsigned new_idx = duplicate (parent_idx, child_idx);
if (new_idx == (unsigned) -1) return child_idx;
return new_idx;
}
/*
* Creates a copy of child and re-assigns the link from
* parent to the clone. The copy is a shallow copy, objects
* linked from child are not duplicated.
*/
unsigned duplicate (unsigned parent_idx, unsigned child_idx)
{
update_parents ();
@ -504,12 +925,12 @@ struct graph_t
{
// Can't duplicate this node, doing so would orphan the original one as all remaining links
// to child are from parent.
DEBUG_MSG (SUBSET_REPACK, nullptr, " Not duplicating %d => %d",
DEBUG_MSG (SUBSET_REPACK, nullptr, " Not duplicating %u => %u",
parent_idx, child_idx);
return false;
return -1;
}
DEBUG_MSG (SUBSET_REPACK, nullptr, " Duplicating %d => %d",
DEBUG_MSG (SUBSET_REPACK, nullptr, " Duplicating %u => %u",
parent_idx, child_idx);
unsigned clone_idx = duplicate (child_idx);
@ -526,7 +947,40 @@ struct graph_t
reassign_link (l, parent_idx, clone_idx);
}
return true;
return clone_idx;
}
/*
* Adds a new node to the graph, not connected to anything.
*/
unsigned new_node (char* head, char* tail)
{
positions_invalid = true;
distance_invalid = true;
auto* clone = vertices_.push ();
if (vertices_.in_error ()) {
return -1;
}
clone->obj.head = head;
clone->obj.tail = tail;
clone->distance = 0;
clone->space = 0;
unsigned clone_idx = vertices_.length - 2;
// The last object is the root of the graph, so swap back the root to the end.
// The root's obj idx does change, however since it's root nothing else refers to it.
// all other obj idx's will be unaffected.
hb_swap (vertices_[vertices_.length - 2], *clone);
// Since the root moved, update the parents arrays of all children on the root.
for (const auto& l : root ().obj.all_links ())
vertices_[l.objidx].remap_parent (root_idx () - 1, root_idx ());
return clone_idx;
}
/*
@ -534,7 +988,7 @@ struct graph_t
*/
bool raise_childrens_priority (unsigned parent_idx)
{
DEBUG_MSG (SUBSET_REPACK, nullptr, " Raising priority of all children of %d",
DEBUG_MSG (SUBSET_REPACK, nullptr, " Raising priority of all children of %u",
parent_idx);
// This operation doesn't change ordering until a sort is run, so no need
// to invalidate positions. It does not change graph structure so no need
@ -546,6 +1000,72 @@ struct graph_t
return made_change;
}
bool is_fully_connected ()
{
update_parents();
if (root().parents)
// Root cannot have parents.
return false;
for (unsigned i = 0; i < root_idx (); i++)
{
if (!vertices_[i].parents)
return false;
}
return true;
}
#if 0
/*
* Saves the current graph to a packed binary format which the repacker fuzzer takes
* as a seed.
*/
void save_fuzzer_seed (hb_tag_t tag) const
{
FILE* f = fopen ("./repacker_fuzzer_seed", "w");
fwrite ((void*) &tag, sizeof (tag), 1, f);
uint16_t num_objects = vertices_.length;
fwrite ((void*) &num_objects, sizeof (num_objects), 1, f);
for (const auto& v : vertices_)
{
uint16_t blob_size = v.table_size ();
fwrite ((void*) &blob_size, sizeof (blob_size), 1, f);
fwrite ((const void*) v.obj.head, blob_size, 1, f);
}
uint16_t link_count = 0;
for (const auto& v : vertices_)
link_count += v.obj.real_links.length;
fwrite ((void*) &link_count, sizeof (link_count), 1, f);
typedef struct
{
uint16_t parent;
uint16_t child;
uint16_t position;
uint8_t width;
} link_t;
for (unsigned i = 0; i < vertices_.length; i++)
{
for (const auto& l : vertices_[i].obj.real_links)
{
link_t link {
(uint16_t) i, (uint16_t) l.objidx,
(uint16_t) l.position, (uint8_t) l.width
};
fwrite ((void*) &link, sizeof (link), 1, f);
}
}
fclose (f);
}
#endif
void print_orphaned_nodes ()
{
if (!DEBUG_ENABLED(SUBSET_REPACK)) return;
@ -554,6 +1074,10 @@ struct graph_t
parents_invalid = true;
update_parents();
if (root().parents) {
DEBUG_MSG (SUBSET_REPACK, nullptr, "Root node has incoming edges.");
}
for (unsigned i = 0; i < root_idx (); i++)
{
const auto& v = vertices_[i];
@ -622,7 +1146,7 @@ struct graph_t
private:
/*
* Returns the numbers of incoming edges that are 32bits wide.
* Returns the numbers of incoming edges that are 24 or 32 bits wide.
*/
unsigned wide_parents (unsigned node_idx, hb_set_t& parents) const
{
@ -636,7 +1160,9 @@ struct graph_t
// Only real links can be wide
for (const auto& l : vertices_[p].obj.real_links)
{
if (l.objidx == node_idx && l.width == 4 && !l.is_signed)
if (l.objidx == node_idx
&& (l.width == 3 || l.width == 4)
&& !l.is_signed)
{
count++;
parents.add (p);
@ -668,6 +1194,11 @@ struct graph_t
}
}
for (unsigned i = 0; i < vertices_.length; i++)
// parents arrays must be accurate or downstream operations like cycle detection
// and sorting won't work correctly.
check_success (!vertices_[i].parents.in_error ());
parents_invalid = false;
}
@ -786,7 +1317,7 @@ struct graph_t
{
for (auto& link : vertices_[i].obj.all_links_writer ())
{
const unsigned *v;
const uint32_t *v;
if (!id_map.has (link.objidx, &v)) continue;
if (only_wide && !(link.width == 4 && !link.is_signed)) continue;
@ -853,6 +1384,7 @@ struct graph_t
bool positions_invalid;
bool successful;
hb_vector_t<unsigned> num_roots_for_space_;
hb_vector_t<char*> buffers;
};
}

View File

@ -0,0 +1,70 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "gsubgpos-graph.hh"
namespace graph {
gsubgpos_graph_context_t::gsubgpos_graph_context_t (hb_tag_t table_tag_,
graph_t& graph_)
: table_tag (table_tag_),
graph (graph_),
lookup_list_index (0),
lookups ()
{
if (table_tag_ != HB_OT_TAG_GPOS
&& table_tag_ != HB_OT_TAG_GSUB)
return;
GSTAR* gstar = graph::GSTAR::graph_to_gstar (graph_);
if (gstar) {
gstar->find_lookups (graph, lookups);
lookup_list_index = gstar->get_lookup_list_index (graph_);
}
}
unsigned gsubgpos_graph_context_t::create_node (unsigned size)
{
char* buffer = (char*) hb_calloc (1, size);
if (!buffer)
return -1;
add_buffer (buffer);
return graph.new_node (buffer, buffer + size);
}
unsigned gsubgpos_graph_context_t::num_non_ext_subtables () {
unsigned count = 0;
for (auto l : lookups.values ())
{
if (l->is_extension (table_tag)) continue;
count += l->number_of_subtables ();
}
return count;
}
}

View File

@ -0,0 +1,61 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "graph.hh"
#include "../hb-ot-layout-gsubgpos.hh"
#ifndef GRAPH_GSUBGPOS_CONTEXT_HH
#define GRAPH_GSUBGPOS_CONTEXT_HH
namespace graph {
struct Lookup;
struct gsubgpos_graph_context_t
{
hb_tag_t table_tag;
graph_t& graph;
unsigned lookup_list_index;
hb_hashmap_t<unsigned, graph::Lookup*> lookups;
HB_INTERNAL gsubgpos_graph_context_t (hb_tag_t table_tag_,
graph_t& graph_);
HB_INTERNAL unsigned create_node (unsigned size);
void add_buffer (char* buffer)
{
graph.add_buffer (buffer);
}
private:
HB_INTERNAL unsigned num_non_ext_subtables ();
};
}
#endif // GRAPH_GSUBGPOS_CONTEXT

View File

@ -0,0 +1,414 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "graph.hh"
#include "../hb-ot-layout-gsubgpos.hh"
#include "../OT/Layout/GSUB/ExtensionSubst.hh"
#include "gsubgpos-context.hh"
#include "pairpos-graph.hh"
#include "markbasepos-graph.hh"
#ifndef GRAPH_GSUBGPOS_GRAPH_HH
#define GRAPH_GSUBGPOS_GRAPH_HH
namespace graph {
struct Lookup;
template<typename T>
struct ExtensionFormat1 : public OT::ExtensionFormat1<T>
{
void reset(unsigned type)
{
this->format = 1;
this->extensionLookupType = type;
this->extensionOffset = 0;
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
return vertex_len >= OT::ExtensionFormat1<T>::static_size;
}
unsigned get_lookup_type () const
{
return this->extensionLookupType;
}
unsigned get_subtable_index (graph_t& graph, unsigned this_index) const
{
return graph.index_for_offset (this_index, &this->extensionOffset);
}
};
struct Lookup : public OT::Lookup
{
unsigned number_of_subtables () const
{
return subTable.len;
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::Lookup::min_size) return false;
return vertex_len >= this->get_size ();
}
bool is_extension (hb_tag_t table_tag) const
{
return lookupType == extension_type (table_tag);
}
bool make_extension (gsubgpos_graph_context_t& c,
unsigned this_index)
{
unsigned type = lookupType;
unsigned ext_type = extension_type (c.table_tag);
if (!ext_type || is_extension (c.table_tag))
{
// NOOP
return true;
}
DEBUG_MSG (SUBSET_REPACK, nullptr,
"Promoting lookup type %u (obj %u) to extension.",
type,
this_index);
for (unsigned i = 0; i < subTable.len; i++)
{
unsigned subtable_index = c.graph.index_for_offset (this_index, &subTable[i]);
if (!make_subtable_extension (c,
this_index,
subtable_index))
return false;
}
lookupType = ext_type;
return true;
}
bool split_subtables_if_needed (gsubgpos_graph_context_t& c,
unsigned this_index)
{
unsigned type = lookupType;
bool is_ext = is_extension (c.table_tag);
if (c.table_tag != HB_OT_TAG_GPOS)
return true;
if (!is_ext &&
type != OT::Layout::GPOS_impl::PosLookupSubTable::Type::Pair &&
type != OT::Layout::GPOS_impl::PosLookupSubTable::Type::MarkBase)
return true;
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>> all_new_subtables;
for (unsigned i = 0; i < subTable.len; i++)
{
unsigned subtable_index = c.graph.index_for_offset (this_index, &subTable[i]);
unsigned parent_index = this_index;
if (is_ext) {
unsigned ext_subtable_index = subtable_index;
parent_index = ext_subtable_index;
ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>* extension =
(ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>*)
c.graph.object (ext_subtable_index).head;
if (!extension || !extension->sanitize (c.graph.vertices_[ext_subtable_index]))
continue;
subtable_index = extension->get_subtable_index (c.graph, ext_subtable_index);
type = extension->get_lookup_type ();
if (type != OT::Layout::GPOS_impl::PosLookupSubTable::Type::Pair
&& type != OT::Layout::GPOS_impl::PosLookupSubTable::Type::MarkBase)
continue;
}
hb_vector_t<unsigned> new_sub_tables;
switch (type)
{
case 2:
new_sub_tables = split_subtable<PairPos> (c, parent_index, subtable_index); break;
case 4:
new_sub_tables = split_subtable<MarkBasePos> (c, parent_index, subtable_index); break;
default:
break;
}
if (new_sub_tables.in_error ()) return false;
if (!new_sub_tables) continue;
hb_pair_t<unsigned, hb_vector_t<unsigned>>* entry = all_new_subtables.push ();
entry->first = i;
entry->second = std::move (new_sub_tables);
}
if (all_new_subtables) {
add_sub_tables (c, this_index, type, all_new_subtables);
}
return true;
}
template<typename T>
hb_vector_t<unsigned> split_subtable (gsubgpos_graph_context_t& c,
unsigned parent_idx,
unsigned objidx)
{
T* sub_table = (T*) c.graph.object (objidx).head;
if (!sub_table || !sub_table->sanitize (c.graph.vertices_[objidx]))
return hb_vector_t<unsigned> ();
return sub_table->split_subtables (c, parent_idx, objidx);
}
void add_sub_tables (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned type,
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
{
bool is_ext = is_extension (c.table_tag);
auto& v = c.graph.vertices_[this_index];
fix_existing_subtable_links (c, this_index, subtable_ids);
unsigned new_subtable_count = 0;
for (const auto& p : subtable_ids)
new_subtable_count += p.second.length;
size_t new_size = v.table_size ()
+ new_subtable_count * OT::Offset16::static_size;
char* buffer = (char*) hb_calloc (1, new_size);
c.add_buffer (buffer);
hb_memcpy (buffer, v.obj.head, v.table_size());
v.obj.head = buffer;
v.obj.tail = buffer + new_size;
Lookup* new_lookup = (Lookup*) buffer;
unsigned shift = 0;
new_lookup->subTable.len = subTable.len + new_subtable_count;
for (const auto& p : subtable_ids)
{
unsigned offset_index = p.first + shift + 1;
shift += p.second.length;
for (unsigned subtable_id : p.second)
{
if (is_ext)
{
unsigned ext_id = create_extension_subtable (c, subtable_id, type);
c.graph.vertices_[subtable_id].parents.push (ext_id);
subtable_id = ext_id;
}
auto* link = v.obj.real_links.push ();
link->width = 2;
link->objidx = subtable_id;
link->position = (char*) &new_lookup->subTable[offset_index++] -
(char*) new_lookup;
c.graph.vertices_[subtable_id].parents.push (this_index);
}
}
// Repacker sort order depends on link order, which we've messed up so resort it.
v.obj.real_links.qsort ();
// The head location of the lookup has changed, invalidating the lookups map entry
// in the context. Update the map.
c.lookups.set (this_index, new_lookup);
}
void fix_existing_subtable_links (gsubgpos_graph_context_t& c,
unsigned this_index,
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
{
auto& v = c.graph.vertices_[this_index];
Lookup* lookup = (Lookup*) v.obj.head;
unsigned shift = 0;
for (const auto& p : subtable_ids)
{
unsigned insert_index = p.first + shift;
unsigned pos_offset = p.second.length * OT::Offset16::static_size;
unsigned insert_offset = (char*) &lookup->subTable[insert_index] - (char*) lookup;
shift += p.second.length;
for (auto& l : v.obj.all_links_writer ())
{
if (l.position > insert_offset) l.position += pos_offset;
}
}
}
unsigned create_extension_subtable (gsubgpos_graph_context_t& c,
unsigned subtable_index,
unsigned type)
{
unsigned extension_size = OT::ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>::static_size;
unsigned ext_index = c.create_node (extension_size);
if (ext_index == (unsigned) -1)
return -1;
auto& ext_vertex = c.graph.vertices_[ext_index];
ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>* extension =
(ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>*) ext_vertex.obj.head;
extension->reset (type);
// Make extension point at the subtable.
auto* l = ext_vertex.obj.real_links.push ();
l->width = 4;
l->objidx = subtable_index;
l->position = 4;
return ext_index;
}
bool make_subtable_extension (gsubgpos_graph_context_t& c,
unsigned lookup_index,
unsigned subtable_index)
{
unsigned type = lookupType;
unsigned ext_index = create_extension_subtable(c, subtable_index, type);
if (ext_index == (unsigned) -1)
return false;
auto& lookup_vertex = c.graph.vertices_[lookup_index];
for (auto& l : lookup_vertex.obj.real_links.writer ())
{
if (l.objidx == subtable_index)
// Change lookup to point at the extension.
l.objidx = ext_index;
}
// Make extension point at the subtable.
auto& ext_vertex = c.graph.vertices_[ext_index];
auto& subtable_vertex = c.graph.vertices_[subtable_index];
ext_vertex.parents.push (lookup_index);
subtable_vertex.remap_parent (lookup_index, ext_index);
return true;
}
private:
unsigned extension_type (hb_tag_t table_tag) const
{
switch (table_tag)
{
case HB_OT_TAG_GPOS: return 9;
case HB_OT_TAG_GSUB: return 7;
default: return 0;
}
}
};
template <typename T>
struct LookupList : public OT::LookupList<T>
{
bool sanitize (const graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::LookupList<T>::min_size) return false;
return vertex_len >= OT::LookupList<T>::item_size * this->len;
}
};
struct GSTAR : public OT::GSUBGPOS
{
static GSTAR* graph_to_gstar (graph_t& graph)
{
const auto& r = graph.root ();
GSTAR* gstar = (GSTAR*) r.obj.head;
if (!gstar || !gstar->sanitize (r))
return nullptr;
return gstar;
}
const void* get_lookup_list_field_offset () const
{
switch (u.version.major) {
case 1: return u.version1.get_lookup_list_offset ();
#ifndef HB_NO_BEYOND_64K
case 2: return u.version2.get_lookup_list_offset ();
#endif
default: return 0;
}
}
bool sanitize (const graph_t::vertex_t& vertex)
{
int64_t len = vertex.obj.tail - vertex.obj.head;
if (len < OT::GSUBGPOS::min_size) return false;
return len >= get_size ();
}
void find_lookups (graph_t& graph,
hb_hashmap_t<unsigned, Lookup*>& lookups /* OUT */)
{
switch (u.version.major) {
case 1: find_lookups<SmallTypes> (graph, lookups); break;
#ifndef HB_NO_BEYOND_64K
case 2: find_lookups<MediumTypes> (graph, lookups); break;
#endif
}
}
unsigned get_lookup_list_index (graph_t& graph)
{
return graph.index_for_offset (graph.root_idx (),
get_lookup_list_field_offset());
}
template<typename Types>
void find_lookups (graph_t& graph,
hb_hashmap_t<unsigned, Lookup*>& lookups /* OUT */)
{
unsigned lookup_list_idx = get_lookup_list_index (graph);
const LookupList<Types>* lookupList =
(const LookupList<Types>*) graph.object (lookup_list_idx).head;
if (!lookupList || !lookupList->sanitize (graph.vertices_[lookup_list_idx]))
return;
for (unsigned i = 0; i < lookupList->len; i++)
{
unsigned lookup_idx = graph.index_for_offset (lookup_list_idx, &(lookupList->arrayZ[i]));
Lookup* lookup = (Lookup*) graph.object (lookup_idx).head;
if (!lookup || !lookup->sanitize (graph.vertices_[lookup_idx])) continue;
lookups.set (lookup_idx, lookup);
}
}
};
}
#endif /* GRAPH_GSUBGPOS_GRAPH_HH */

View File

@ -0,0 +1,510 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#ifndef GRAPH_MARKBASEPOS_GRAPH_HH
#define GRAPH_MARKBASEPOS_GRAPH_HH
#include "split-helpers.hh"
#include "coverage-graph.hh"
#include "../OT/Layout/GPOS/MarkBasePos.hh"
#include "../OT/Layout/GPOS/PosLookupSubTable.hh"
namespace graph {
struct AnchorMatrix : public OT::Layout::GPOS_impl::AnchorMatrix
{
bool sanitize (graph_t::vertex_t& vertex, unsigned class_count) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < AnchorMatrix::min_size) return false;
return vertex_len >= AnchorMatrix::min_size +
OT::Offset16::static_size * class_count * this->rows;
}
bool shrink (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned old_class_count,
unsigned new_class_count)
{
if (new_class_count >= old_class_count) return false;
auto& o = c.graph.vertices_[this_index].obj;
unsigned base_count = rows;
o.tail = o.head +
AnchorMatrix::min_size +
OT::Offset16::static_size * base_count * new_class_count;
// Reposition links into the new indexing scheme.
for (auto& link : o.real_links.writer ())
{
unsigned index = (link.position - 2) / 2;
unsigned base = index / old_class_count;
unsigned klass = index % old_class_count;
if (klass >= new_class_count)
// should have already been removed
return false;
unsigned new_index = base * new_class_count + klass;
link.position = (char*) &(this->matrixZ[new_index]) - (char*) this;
}
return true;
}
unsigned clone (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned start,
unsigned end,
unsigned class_count)
{
unsigned base_count = rows;
unsigned new_class_count = end - start;
unsigned size = AnchorMatrix::min_size +
OT::Offset16::static_size * new_class_count * rows;
unsigned prime_id = c.create_node (size);
if (prime_id == (unsigned) -1) return -1;
AnchorMatrix* prime = (AnchorMatrix*) c.graph.object (prime_id).head;
prime->rows = base_count;
auto& o = c.graph.vertices_[this_index].obj;
int num_links = o.real_links.length;
for (int i = 0; i < num_links; i++)
{
const auto& link = o.real_links[i];
unsigned old_index = (link.position - 2) / OT::Offset16::static_size;
unsigned klass = old_index % class_count;
if (klass < start || klass >= end) continue;
unsigned base = old_index / class_count;
unsigned new_klass = klass - start;
unsigned new_index = base * new_class_count + new_klass;
unsigned child_idx = link.objidx;
c.graph.add_link (&(prime->matrixZ[new_index]),
prime_id,
child_idx);
auto& child = c.graph.vertices_[child_idx];
child.remove_parent (this_index);
o.real_links.remove_unordered (i);
num_links--;
i--;
}
return prime_id;
}
};
struct MarkArray : public OT::Layout::GPOS_impl::MarkArray
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
unsigned min_size = MarkArray::min_size;
if (vertex_len < min_size) return false;
return vertex_len >= get_size ();
}
bool shrink (gsubgpos_graph_context_t& c,
const hb_hashmap_t<unsigned, unsigned>& mark_array_links,
unsigned this_index,
unsigned new_class_count)
{
auto& o = c.graph.vertices_[this_index].obj;
for (const auto& link : o.real_links)
c.graph.vertices_[link.objidx].remove_parent (this_index);
o.real_links.reset ();
unsigned new_index = 0;
for (const auto& record : this->iter ())
{
unsigned klass = record.klass;
if (klass >= new_class_count) continue;
(*this)[new_index].klass = klass;
unsigned position = (char*) &record.markAnchor - (char*) this;
unsigned* objidx;
if (!mark_array_links.has (position, &objidx))
{
new_index++;
continue;
}
c.graph.add_link (&(*this)[new_index].markAnchor, this_index, *objidx);
new_index++;
}
this->len = new_index;
o.tail = o.head + MarkArray::min_size +
OT::Layout::GPOS_impl::MarkRecord::static_size * new_index;
return true;
}
unsigned clone (gsubgpos_graph_context_t& c,
unsigned this_index,
const hb_hashmap_t<unsigned, unsigned>& pos_to_index,
hb_set_t& marks,
unsigned start_class)
{
unsigned size = MarkArray::min_size +
OT::Layout::GPOS_impl::MarkRecord::static_size *
marks.get_population ();
unsigned prime_id = c.create_node (size);
if (prime_id == (unsigned) -1) return -1;
MarkArray* prime = (MarkArray*) c.graph.object (prime_id).head;
prime->len = marks.get_population ();
unsigned i = 0;
for (hb_codepoint_t mark : marks)
{
(*prime)[i].klass = (*this)[mark].klass - start_class;
unsigned offset_pos = (char*) &((*this)[mark].markAnchor) - (char*) this;
unsigned* anchor_index;
if (pos_to_index.has (offset_pos, &anchor_index))
c.graph.move_child (this_index,
&((*this)[mark].markAnchor),
prime_id,
&((*prime)[i].markAnchor));
i++;
}
return prime_id;
}
};
struct MarkBasePosFormat1 : public OT::Layout::GPOS_impl::MarkBasePosFormat1_2<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
return vertex_len >= MarkBasePosFormat1::static_size;
}
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
hb_set_t visited;
const unsigned base_coverage_id = c.graph.index_for_offset (this_index, &baseCoverage);
const unsigned base_size =
OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size +
MarkArray::min_size +
AnchorMatrix::min_size +
c.graph.vertices_[base_coverage_id].table_size ();
hb_vector_t<class_info_t> class_to_info = get_class_info (c, this_index);
unsigned class_count = classCount;
auto base_array = c.graph.as_table<AnchorMatrix> (this_index,
&baseArray,
class_count);
if (!base_array) return hb_vector_t<unsigned> ();
unsigned base_count = base_array.table->rows;
unsigned partial_coverage_size = 4;
unsigned accumulated = base_size;
hb_vector_t<unsigned> split_points;
for (unsigned klass = 0; klass < class_count; klass++)
{
class_info_t& info = class_to_info[klass];
partial_coverage_size += OT::HBUINT16::static_size * info.marks.get_population ();
unsigned accumulated_delta =
OT::Layout::GPOS_impl::MarkRecord::static_size * info.marks.get_population () +
OT::Offset16::static_size * base_count;
for (unsigned objidx : info.child_indices)
accumulated_delta += c.graph.find_subgraph_size (objidx, visited);
accumulated += accumulated_delta;
unsigned total = accumulated + partial_coverage_size;
if (total >= (1 << 16))
{
split_points.push (klass);
accumulated = base_size + accumulated_delta;
partial_coverage_size = 4 + OT::HBUINT16::static_size * info.marks.get_population ();
visited.clear (); // node sharing isn't allowed between splits.
}
}
const unsigned mark_array_id = c.graph.index_for_offset (this_index, &markArray);
split_context_t split_context {
c,
this,
c.graph.duplicate_if_shared (parent_index, this_index),
std::move (class_to_info),
c.graph.vertices_[mark_array_id].position_to_index_map (),
};
return actuate_subtable_split<split_context_t> (split_context, split_points);
}
private:
struct class_info_t {
hb_set_t marks;
hb_vector_t<unsigned> child_indices;
};
struct split_context_t {
gsubgpos_graph_context_t& c;
MarkBasePosFormat1* thiz;
unsigned this_index;
hb_vector_t<class_info_t> class_to_info;
hb_hashmap_t<unsigned, unsigned> mark_array_links;
hb_set_t marks_for (unsigned start, unsigned end)
{
hb_set_t marks;
for (unsigned klass = start; klass < end; klass++)
{
+ class_to_info[klass].marks.iter ()
| hb_sink (marks)
;
}
return marks;
}
unsigned original_count ()
{
return thiz->classCount;
}
unsigned clone_range (unsigned start, unsigned end)
{
return thiz->clone_range (*this, this->this_index, start, end);
}
bool shrink (unsigned count)
{
return thiz->shrink (*this, this->this_index, count);
}
};
hb_vector_t<class_info_t> get_class_info (gsubgpos_graph_context_t& c,
unsigned this_index)
{
hb_vector_t<class_info_t> class_to_info;
unsigned class_count= classCount;
class_to_info.resize (class_count);
auto mark_array = c.graph.as_table<MarkArray> (this_index, &markArray);
if (!mark_array) return hb_vector_t<class_info_t> ();
unsigned mark_count = mark_array.table->len;
for (unsigned mark = 0; mark < mark_count; mark++)
{
unsigned klass = (*mark_array.table)[mark].get_class ();
class_to_info[klass].marks.add (mark);
}
for (const auto& link : mark_array.vertex->obj.real_links)
{
unsigned mark = (link.position - 2) /
OT::Layout::GPOS_impl::MarkRecord::static_size;
unsigned klass = (*mark_array.table)[mark].get_class ();
class_to_info[klass].child_indices.push (link.objidx);
}
unsigned base_array_id =
c.graph.index_for_offset (this_index, &baseArray);
auto& base_array_v = c.graph.vertices_[base_array_id];
for (const auto& link : base_array_v.obj.real_links)
{
unsigned index = (link.position - 2) / OT::Offset16::static_size;
unsigned klass = index % class_count;
class_to_info[klass].child_indices.push (link.objidx);
}
return class_to_info;
}
bool shrink (split_context_t& sc,
unsigned this_index,
unsigned count)
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Shrinking MarkBasePosFormat1 (%u) to [0, %u).",
this_index,
count);
unsigned old_count = classCount;
if (count >= old_count)
return true;
classCount = count;
auto mark_coverage = sc.c.graph.as_mutable_table<Coverage> (this_index,
&markCoverage);
if (!mark_coverage) return false;
hb_set_t marks = sc.marks_for (0, count);
auto new_coverage =
+ hb_enumerate (mark_coverage.table->iter ())
| hb_filter (marks, hb_first)
| hb_map_retains_sorting (hb_second)
;
if (!Coverage::make_coverage (sc.c, + new_coverage,
mark_coverage.index,
4 + 2 * marks.get_population ()))
return false;
auto base_array = sc.c.graph.as_mutable_table<AnchorMatrix> (this_index,
&baseArray,
old_count);
if (!base_array || !base_array.table->shrink (sc.c,
base_array.index,
old_count,
count))
return false;
auto mark_array = sc.c.graph.as_mutable_table<MarkArray> (this_index,
&markArray);
if (!mark_array || !mark_array.table->shrink (sc.c,
sc.mark_array_links,
mark_array.index,
count))
return false;
return true;
}
// Create a new MarkBasePos that has all of the data for classes from [start, end).
unsigned clone_range (split_context_t& sc,
unsigned this_index,
unsigned start, unsigned end) const
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Cloning MarkBasePosFormat1 (%u) range [%u, %u).", this_index, start, end);
graph_t& graph = sc.c.graph;
unsigned prime_size = OT::Layout::GPOS_impl::MarkBasePosFormat1_2<SmallTypes>::static_size;
unsigned prime_id = sc.c.create_node (prime_size);
if (prime_id == (unsigned) -1) return -1;
MarkBasePosFormat1* prime = (MarkBasePosFormat1*) graph.object (prime_id).head;
prime->format = this->format;
unsigned new_class_count = end - start;
prime->classCount = new_class_count;
unsigned base_coverage_id =
graph.index_for_offset (sc.this_index, &baseCoverage);
graph.add_link (&(prime->baseCoverage), prime_id, base_coverage_id);
graph.duplicate (prime_id, base_coverage_id);
auto mark_coverage = sc.c.graph.as_table<Coverage> (this_index,
&markCoverage);
if (!mark_coverage) return false;
hb_set_t marks = sc.marks_for (start, end);
auto new_coverage =
+ hb_enumerate (mark_coverage.table->iter ())
| hb_filter (marks, hb_first)
| hb_map_retains_sorting (hb_second)
;
if (!Coverage::add_coverage (sc.c,
prime_id,
2,
+ new_coverage,
marks.get_population () * 2 + 4))
return -1;
auto mark_array =
graph.as_table <MarkArray> (sc.this_index, &markArray);
if (!mark_array) return -1;
unsigned new_mark_array =
mark_array.table->clone (sc.c,
mark_array.index,
sc.mark_array_links,
marks,
start);
graph.add_link (&(prime->markArray), prime_id, new_mark_array);
unsigned class_count = classCount;
auto base_array =
graph.as_table<AnchorMatrix> (sc.this_index, &baseArray, class_count);
if (!base_array) return -1;
unsigned new_base_array =
base_array.table->clone (sc.c,
base_array.index,
start, end, this->classCount);
graph.add_link (&(prime->baseArray), prime_id, new_base_array);
return prime_id;
}
};
struct MarkBasePos : public OT::Layout::GPOS_impl::MarkBasePos
{
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
switch (u.format) {
case 1:
return ((MarkBasePosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
#ifndef HB_NO_BEYOND_64K
case 2: HB_FALLTHROUGH;
// Don't split 24bit PairPos's.
#endif
default:
return hb_vector_t<unsigned> ();
}
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < u.format.get_size ()) return false;
switch (u.format) {
case 1:
return ((MarkBasePosFormat1*)(&u.format1))->sanitize (vertex);
#ifndef HB_NO_BEYOND_64K
case 2: HB_FALLTHROUGH;
#endif
default:
// We don't handle format 3 and 4 here.
return false;
}
}
};
}
#endif // GRAPH_MARKBASEPOS_GRAPH_HH

View File

@ -0,0 +1,647 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#ifndef GRAPH_PAIRPOS_GRAPH_HH
#define GRAPH_PAIRPOS_GRAPH_HH
#include "split-helpers.hh"
#include "coverage-graph.hh"
#include "classdef-graph.hh"
#include "../OT/Layout/GPOS/PairPos.hh"
#include "../OT/Layout/GPOS/PosLookupSubTable.hh"
namespace graph {
struct PairPosFormat1 : public OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
unsigned min_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
return vertex_len >=
min_size + pairSet.get_size () - pairSet.len.get_size();
}
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
hb_set_t visited;
const unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
const unsigned coverage_size = c.graph.vertices_[coverage_id].table_size ();
const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size;
unsigned partial_coverage_size = 4;
unsigned accumulated = base_size;
hb_vector_t<unsigned> split_points;
for (unsigned i = 0; i < pairSet.len; i++)
{
unsigned pair_set_index = pair_set_graph_index (c, this_index, i);
unsigned accumulated_delta =
c.graph.find_subgraph_size (pair_set_index, visited) +
SmallTypes::size; // for PairSet offset.
partial_coverage_size += OT::HBUINT16::static_size;
accumulated += accumulated_delta;
unsigned total = accumulated + hb_min (partial_coverage_size, coverage_size);
if (total >= (1 << 16))
{
split_points.push (i);
accumulated = base_size + accumulated_delta;
partial_coverage_size = 6;
visited.clear (); // node sharing isn't allowed between splits.
}
}
split_context_t split_context {
c,
this,
c.graph.duplicate_if_shared (parent_index, this_index),
};
return actuate_subtable_split<split_context_t> (split_context, split_points);
}
private:
struct split_context_t {
gsubgpos_graph_context_t& c;
PairPosFormat1* thiz;
unsigned this_index;
unsigned original_count ()
{
return thiz->pairSet.len;
}
unsigned clone_range (unsigned start, unsigned end)
{
return thiz->clone_range (this->c, this->this_index, start, end);
}
bool shrink (unsigned count)
{
return thiz->shrink (this->c, this->this_index, count);
}
};
bool shrink (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned count)
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Shrinking PairPosFormat1 (%u) to [0, %u).",
this_index,
count);
unsigned old_count = pairSet.len;
if (count >= old_count)
return true;
pairSet.len = count;
c.graph.vertices_[this_index].obj.tail -= (old_count - count) * SmallTypes::size;
auto coverage = c.graph.as_mutable_table<Coverage> (this_index, &this->coverage);
if (!coverage) return false;
unsigned coverage_size = coverage.vertex->table_size ();
auto new_coverage =
+ hb_zip (coverage.table->iter (), hb_range ())
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
return p.second < count;
})
| hb_map_retains_sorting (hb_first)
;
return Coverage::make_coverage (c, new_coverage, coverage.index, coverage_size);
}
// Create a new PairPos including PairSet's from start (inclusive) to end (exclusive).
// Returns object id of the new object.
unsigned clone_range (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned start, unsigned end) const
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Cloning PairPosFormat1 (%u) range [%u, %u).", this_index, start, end);
unsigned num_pair_sets = end - start;
unsigned prime_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size
+ num_pair_sets * SmallTypes::size;
unsigned pair_pos_prime_id = c.create_node (prime_size);
if (pair_pos_prime_id == (unsigned) -1) return -1;
PairPosFormat1* pair_pos_prime = (PairPosFormat1*) c.graph.object (pair_pos_prime_id).head;
pair_pos_prime->format = this->format;
pair_pos_prime->valueFormat[0] = this->valueFormat[0];
pair_pos_prime->valueFormat[1] = this->valueFormat[1];
pair_pos_prime->pairSet.len = num_pair_sets;
for (unsigned i = start; i < end; i++)
{
c.graph.move_child<> (this_index,
&pairSet[i],
pair_pos_prime_id,
&pair_pos_prime->pairSet[i - start]);
}
unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
if (!Coverage::clone_coverage (c,
coverage_id,
pair_pos_prime_id,
2,
start, end))
return -1;
return pair_pos_prime_id;
}
unsigned pair_set_graph_index (gsubgpos_graph_context_t& c, unsigned this_index, unsigned i) const
{
return c.graph.index_for_offset (this_index, &pairSet[i]);
}
};
struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
size_t vertex_len = vertex.table_size ();
unsigned min_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
const unsigned class1_count = class1Count;
return vertex_len >=
min_size + class1_count * get_class1_record_size ();
}
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size;
const unsigned class_def_2_size = size_of (c, this_index, &classDef2);
const Coverage* coverage = get_coverage (c, this_index);
const ClassDef* class_def_1 = get_class_def_1 (c, this_index);
auto gid_and_class =
+ coverage->iter ()
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
return hb_pair_t<hb_codepoint_t, hb_codepoint_t> (gid, class_def_1->get_class (gid));
})
;
class_def_size_estimator_t estimator (gid_and_class);
const unsigned class1_count = class1Count;
const unsigned class2_count = class2Count;
const unsigned class1_record_size = get_class1_record_size ();
const unsigned value_1_len = valueFormat1.get_len ();
const unsigned value_2_len = valueFormat2.get_len ();
const unsigned total_value_len = value_1_len + value_2_len;
unsigned accumulated = base_size;
unsigned coverage_size = 4;
unsigned class_def_1_size = 4;
unsigned max_coverage_size = coverage_size;
unsigned max_class_def_1_size = class_def_1_size;
hb_vector_t<unsigned> split_points;
hb_hashmap_t<unsigned, unsigned> device_tables = get_all_device_tables (c, this_index);
hb_vector_t<unsigned> format1_device_table_indices = valueFormat1.get_device_table_indices ();
hb_vector_t<unsigned> format2_device_table_indices = valueFormat2.get_device_table_indices ();
bool has_device_tables = bool(format1_device_table_indices) || bool(format2_device_table_indices);
hb_set_t visited;
for (unsigned i = 0; i < class1_count; i++)
{
unsigned accumulated_delta = class1_record_size;
coverage_size += estimator.incremental_coverage_size (i);
class_def_1_size += estimator.incremental_class_def_size (i);
max_coverage_size = hb_max (max_coverage_size, coverage_size);
max_class_def_1_size = hb_max (max_class_def_1_size, class_def_1_size);
if (has_device_tables) {
for (unsigned j = 0; j < class2_count; j++)
{
unsigned value1_index = total_value_len * (class2_count * i + j);
unsigned value2_index = value1_index + value_1_len;
accumulated_delta += size_of_value_record_children (c,
device_tables,
format1_device_table_indices,
value1_index,
visited);
accumulated_delta += size_of_value_record_children (c,
device_tables,
format2_device_table_indices,
value2_index,
visited);
}
}
accumulated += accumulated_delta;
unsigned total = accumulated
+ coverage_size + class_def_1_size + class_def_2_size
// The largest object will pack last and can exceed the size limit.
- hb_max (hb_max (coverage_size, class_def_1_size), class_def_2_size);
if (total >= (1 << 16))
{
split_points.push (i);
// split does not include i, so add the size for i when we reset the size counters.
accumulated = base_size + accumulated_delta;
coverage_size = 4 + estimator.incremental_coverage_size (i);
class_def_1_size = 4 + estimator.incremental_class_def_size (i);
visited.clear (); // node sharing isn't allowed between splits.
}
}
split_context_t split_context {
c,
this,
c.graph.duplicate_if_shared (parent_index, this_index),
class1_record_size,
total_value_len,
value_1_len,
value_2_len,
max_coverage_size,
max_class_def_1_size,
device_tables,
format1_device_table_indices,
format2_device_table_indices
};
return actuate_subtable_split<split_context_t> (split_context, split_points);
}
private:
struct split_context_t
{
gsubgpos_graph_context_t& c;
PairPosFormat2* thiz;
unsigned this_index;
unsigned class1_record_size;
unsigned value_record_len;
unsigned value1_record_len;
unsigned value2_record_len;
unsigned max_coverage_size;
unsigned max_class_def_size;
const hb_hashmap_t<unsigned, unsigned>& device_tables;
const hb_vector_t<unsigned>& format1_device_table_indices;
const hb_vector_t<unsigned>& format2_device_table_indices;
unsigned original_count ()
{
return thiz->class1Count;
}
unsigned clone_range (unsigned start, unsigned end)
{
return thiz->clone_range (*this, start, end);
}
bool shrink (unsigned count)
{
return thiz->shrink (*this, count);
}
};
size_t get_class1_record_size () const
{
const size_t class2_count = class2Count;
return
class2_count * (valueFormat1.get_size () + valueFormat2.get_size ());
}
unsigned clone_range (split_context_t& split_context,
unsigned start, unsigned end) const
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Cloning PairPosFormat2 (%u) range [%u, %u).", split_context.this_index, start, end);
graph_t& graph = split_context.c.graph;
unsigned num_records = end - start;
unsigned prime_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size
+ num_records * split_context.class1_record_size;
unsigned pair_pos_prime_id = split_context.c.create_node (prime_size);
if (pair_pos_prime_id == (unsigned) -1) return -1;
PairPosFormat2* pair_pos_prime =
(PairPosFormat2*) graph.object (pair_pos_prime_id).head;
pair_pos_prime->format = this->format;
pair_pos_prime->valueFormat1 = this->valueFormat1;
pair_pos_prime->valueFormat2 = this->valueFormat2;
pair_pos_prime->class1Count = num_records;
pair_pos_prime->class2Count = this->class2Count;
clone_class1_records (split_context,
pair_pos_prime_id,
start,
end);
unsigned coverage_id =
graph.index_for_offset (split_context.this_index, &coverage);
unsigned class_def_1_id =
graph.index_for_offset (split_context.this_index, &classDef1);
auto& coverage_v = graph.vertices_[coverage_id];
auto& class_def_1_v = graph.vertices_[class_def_1_id];
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
ClassDef* class_def_1_table = (ClassDef*) class_def_1_v.obj.head;
if (!coverage_table
|| !coverage_table->sanitize (coverage_v)
|| !class_def_1_table
|| !class_def_1_table->sanitize (class_def_1_v))
return -1;
auto klass_map =
+ coverage_table->iter ()
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
return hb_pair_t<hb_codepoint_t, hb_codepoint_t> (gid, class_def_1_table->get_class (gid));
})
| hb_filter ([&] (hb_codepoint_t klass) {
return klass >= start && klass < end;
}, hb_second)
| hb_map_retains_sorting ([&] (hb_pair_t<hb_codepoint_t, hb_codepoint_t> gid_and_class) {
// Classes must be from 0...N so subtract start
return hb_pair_t<hb_codepoint_t, hb_codepoint_t> (gid_and_class.first, gid_and_class.second - start);
})
;
if (!Coverage::add_coverage (split_context.c,
pair_pos_prime_id,
2,
+ klass_map | hb_map_retains_sorting (hb_first),
split_context.max_coverage_size))
return -1;
// classDef1
if (!ClassDef::add_class_def (split_context.c,
pair_pos_prime_id,
8,
+ klass_map,
split_context.max_class_def_size))
return -1;
// classDef2
unsigned class_def_2_id =
graph.index_for_offset (split_context.this_index, &classDef2);
auto* class_def_link = graph.vertices_[pair_pos_prime_id].obj.real_links.push ();
class_def_link->width = SmallTypes::size;
class_def_link->objidx = class_def_2_id;
class_def_link->position = 10;
graph.vertices_[class_def_2_id].parents.push (pair_pos_prime_id);
graph.duplicate (pair_pos_prime_id, class_def_2_id);
return pair_pos_prime_id;
}
void clone_class1_records (split_context_t& split_context,
unsigned pair_pos_prime_id,
unsigned start, unsigned end) const
{
PairPosFormat2* pair_pos_prime =
(PairPosFormat2*) split_context.c.graph.object (pair_pos_prime_id).head;
char* start_addr = ((char*)&values[0]) + start * split_context.class1_record_size;
unsigned num_records = end - start;
hb_memcpy (&pair_pos_prime->values[0],
start_addr,
num_records * split_context.class1_record_size);
if (!split_context.format1_device_table_indices
&& !split_context.format2_device_table_indices)
// No device tables to move over.
return;
unsigned class2_count = class2Count;
for (unsigned i = start; i < end; i++)
{
for (unsigned j = 0; j < class2_count; j++)
{
unsigned value1_index = split_context.value_record_len * (class2_count * i + j);
unsigned value2_index = value1_index + split_context.value1_record_len;
unsigned new_value1_index = split_context.value_record_len * (class2_count * (i - start) + j);
unsigned new_value2_index = new_value1_index + split_context.value1_record_len;
transfer_device_tables (split_context,
pair_pos_prime_id,
split_context.format1_device_table_indices,
value1_index,
new_value1_index);
transfer_device_tables (split_context,
pair_pos_prime_id,
split_context.format2_device_table_indices,
value2_index,
new_value2_index);
}
}
}
void transfer_device_tables (split_context_t& split_context,
unsigned pair_pos_prime_id,
const hb_vector_t<unsigned>& device_table_indices,
unsigned old_value_record_index,
unsigned new_value_record_index) const
{
PairPosFormat2* pair_pos_prime =
(PairPosFormat2*) split_context.c.graph.object (pair_pos_prime_id).head;
for (unsigned i : device_table_indices)
{
OT::Offset16* record = (OT::Offset16*) &values[old_value_record_index + i];
unsigned record_position = ((char*) record) - ((char*) this);
if (!split_context.device_tables.has (record_position)) continue;
split_context.c.graph.move_child (
split_context.this_index,
record,
pair_pos_prime_id,
(OT::Offset16*) &pair_pos_prime->values[new_value_record_index + i]);
}
}
bool shrink (split_context_t& split_context,
unsigned count)
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Shrinking PairPosFormat2 (%u) to [0, %u).",
split_context.this_index,
count);
unsigned old_count = class1Count;
if (count >= old_count)
return true;
graph_t& graph = split_context.c.graph;
class1Count = count;
graph.vertices_[split_context.this_index].obj.tail -=
(old_count - count) * split_context.class1_record_size;
auto coverage =
graph.as_mutable_table<Coverage> (split_context.this_index, &this->coverage);
if (!coverage) return false;
auto class_def_1 =
graph.as_mutable_table<ClassDef> (split_context.this_index, &classDef1);
if (!class_def_1) return false;
auto klass_map =
+ coverage.table->iter ()
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
return hb_pair_t<hb_codepoint_t, hb_codepoint_t> (gid, class_def_1.table->get_class (gid));
})
| hb_filter ([&] (hb_codepoint_t klass) {
return klass < count;
}, hb_second)
;
auto new_coverage = + klass_map | hb_map_retains_sorting (hb_first);
if (!Coverage::make_coverage (split_context.c,
+ new_coverage,
coverage.index,
// existing ranges my not be kept, worst case size is a format 1
// coverage table.
4 + new_coverage.len() * 2))
return false;
return ClassDef::make_class_def (split_context.c,
+ klass_map,
class_def_1.index,
class_def_1.vertex->table_size ());
}
hb_hashmap_t<unsigned, unsigned>
get_all_device_tables (gsubgpos_graph_context_t& c,
unsigned this_index) const
{
const auto& v = c.graph.vertices_[this_index];
return v.position_to_index_map ();
}
const Coverage* get_coverage (gsubgpos_graph_context_t& c,
unsigned this_index) const
{
unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
auto& coverage_v = c.graph.vertices_[coverage_id];
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
if (!coverage_table || !coverage_table->sanitize (coverage_v))
return &Null(Coverage);
return coverage_table;
}
const ClassDef* get_class_def_1 (gsubgpos_graph_context_t& c,
unsigned this_index) const
{
unsigned class_def_1_id = c.graph.index_for_offset (this_index, &classDef1);
auto& class_def_1_v = c.graph.vertices_[class_def_1_id];
ClassDef* class_def_1_table = (ClassDef*) class_def_1_v.obj.head;
if (!class_def_1_table || !class_def_1_table->sanitize (class_def_1_v))
return &Null(ClassDef);
return class_def_1_table;
}
unsigned size_of_value_record_children (gsubgpos_graph_context_t& c,
const hb_hashmap_t<unsigned, unsigned>& device_tables,
const hb_vector_t<unsigned> device_table_indices,
unsigned value_record_index,
hb_set_t& visited)
{
unsigned size = 0;
for (unsigned i : device_table_indices)
{
OT::Layout::GPOS_impl::Value* record = &values[value_record_index + i];
unsigned record_position = ((char*) record) - ((char*) this);
unsigned* obj_idx;
if (!device_tables.has (record_position, &obj_idx)) continue;
size += c.graph.find_subgraph_size (*obj_idx, visited);
}
return size;
}
unsigned size_of (gsubgpos_graph_context_t& c,
unsigned this_index,
const void* offset) const
{
const unsigned id = c.graph.index_for_offset (this_index, offset);
return c.graph.vertices_[id].table_size ();
}
};
struct PairPos : public OT::Layout::GPOS_impl::PairPos
{
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
switch (u.format) {
case 1:
return ((PairPosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
case 2:
return ((PairPosFormat2*)(&u.format2))->split_subtables (c, parent_index, this_index);
#ifndef HB_NO_BEYOND_64K
case 3: HB_FALLTHROUGH;
case 4: HB_FALLTHROUGH;
// Don't split 24bit PairPos's.
#endif
default:
return hb_vector_t<unsigned> ();
}
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < u.format.get_size ()) return false;
switch (u.format) {
case 1:
return ((PairPosFormat1*)(&u.format1))->sanitize (vertex);
case 2:
return ((PairPosFormat2*)(&u.format2))->sanitize (vertex);
#ifndef HB_NO_BEYOND_64K
case 3: HB_FALLTHROUGH;
case 4: HB_FALLTHROUGH;
#endif
default:
// We don't handle format 3 and 4 here.
return false;
}
}
};
}
#endif // GRAPH_PAIRPOS_GRAPH_HH

View File

@ -33,6 +33,23 @@ struct overflow_record_t
{
unsigned parent;
unsigned child;
bool operator != (const overflow_record_t o) const
{ return !(*this == o); }
inline bool operator == (const overflow_record_t& o) const
{
return parent == o.parent &&
child == o.child;
}
inline uint32_t hash () const
{
uint32_t current = 0;
current = current * 31 + hb_hash (parent);
current = current * 31 + hb_hash (child);
return current;
}
};
inline
@ -94,6 +111,7 @@ will_overflow (graph_t& graph,
if (overflows) overflows->resize (0);
graph.update_positions ();
hb_hashmap_t<overflow_record_t*, bool> record_set;
const auto& vertices = graph.vertices_;
for (int parent_idx = vertices.length - 1; parent_idx >= 0; parent_idx--)
{
@ -109,7 +127,10 @@ will_overflow (graph_t& graph,
overflow_record_t r;
r.parent = parent_idx;
r.child = link.objidx;
if (record_set.has(&r)) continue; // don't keep duplicate overflows.
overflows->push (r);
record_set.set(&r, true);
}
}
@ -132,8 +153,8 @@ void print_overflows (graph_t& graph,
const auto& child = graph.vertices_[o.child];
DEBUG_MSG (SUBSET_REPACK, nullptr,
" overflow from "
"%4d (%4d in, %4d out, space %2d) => "
"%4d (%4d in, %4d out, space %2d)",
"%4u (%4u in, %4u out, space %2u) => "
"%4u (%4u in, %4u out, space %2u)",
o.parent,
parent.incoming_edges (),
parent.obj.real_links.length + parent.obj.virtual_links.length,
@ -144,7 +165,7 @@ void print_overflows (graph_t& graph,
graph.space_for (o.child));
}
if (overflows.length > 10) {
DEBUG_MSG (SUBSET_REPACK, nullptr, " ... plus %d more overflows.", overflows.length - 10);
DEBUG_MSG (SUBSET_REPACK, nullptr, " ... plus %u more overflows.", overflows.length - 10);
}
}
@ -223,7 +244,7 @@ inline hb_blob_t* serialize (const graph_t& graph)
return nullptr;
}
memcpy (start, vertices[i].obj.head, size);
hb_memcpy (start, vertices[i].obj.head, size);
// Only real links needs to be serialized.
for (const auto& link : vertices[i].obj.real_links)

View File

@ -0,0 +1,69 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#ifndef GRAPH_SPLIT_HELPERS_HH
#define GRAPH_SPLIT_HELPERS_HH
namespace graph {
template<typename Context>
HB_INTERNAL
hb_vector_t<unsigned> actuate_subtable_split (Context& split_context,
const hb_vector_t<unsigned>& split_points)
{
hb_vector_t<unsigned> new_objects;
if (!split_points)
return new_objects;
for (unsigned i = 0; i < split_points.length; i++)
{
unsigned start = split_points[i];
unsigned end = (i < split_points.length - 1)
? split_points[i + 1]
: split_context.original_count ();
unsigned id = split_context.clone_range (start, end);
if (id == (unsigned) -1)
{
new_objects.reset ();
new_objects.allocated = -1; // mark error
return new_objects;
}
new_objects.push (id);
}
if (!split_context.shrink (split_points[0]))
{
new_objects.reset ();
new_objects.allocated = -1; // mark error
}
return new_objects;
}
}
#endif // GRAPH_SPLIT_HELPERS_HH

View File

@ -42,7 +42,7 @@ struct BaselineTableFormat0Part
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
protected:
@ -78,7 +78,7 @@ struct BaselineTableFormat2Part
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
protected:

View File

@ -28,6 +28,7 @@
#define HB_AAT_LAYOUT_COMMON_HH
#include "hb-aat-layout.hh"
#include "hb-aat-map.hh"
#include "hb-open-type.hh"
namespace OT {
@ -39,6 +40,43 @@ namespace AAT {
using namespace OT;
struct ankr;
struct hb_aat_apply_context_t :
hb_dispatch_context_t<hb_aat_apply_context_t, bool, HB_DEBUG_APPLY>
{
const char *get_name () { return "APPLY"; }
template <typename T>
return_t dispatch (const T &obj) { return obj.apply (this); }
static return_t default_return_value () { return false; }
bool stop_sublookup_iteration (return_t r) const { return r; }
const hb_ot_shape_plan_t *plan;
hb_font_t *font;
hb_face_t *face;
hb_buffer_t *buffer;
hb_sanitize_context_t sanitizer;
const ankr *ankr_table;
const OT::GDEF *gdef_table;
const hb_sorted_vector_t<hb_aat_map_t::range_flags_t> *range_flags = nullptr;
hb_mask_t subtable_flags = 0;
/* Unused. For debug tracing only. */
unsigned int lookup_index;
HB_INTERNAL hb_aat_apply_context_t (const hb_ot_shape_plan_t *plan_,
hb_font_t *font_,
hb_buffer_t *buffer_,
hb_blob_t *blob = const_cast<hb_blob_t *> (&Null (hb_blob_t)));
HB_INTERNAL ~hb_aat_apply_context_t ();
HB_INTERNAL void set_ankr_table (const AAT::ankr *ankr_table_);
void set_lookup_index (unsigned int i) { lookup_index = i; }
};
/*
* Lookup Table
*/
@ -415,18 +453,7 @@ struct Lookup
public:
DEFINE_SIZE_UNION (2, format);
};
/* Lookup 0 has unbounded size (dependant on num_glyphs). So we need to defined
* special NULL objects for Lookup<> objects, but since it's template our macros
* don't work. So we have to hand-code them here. UGLY. */
} /* Close namespace. */
/* Ugly hand-coded null objects for template Lookup<> :(. */
extern HB_INTERNAL const unsigned char _hb_Null_AAT_Lookup[2];
template <typename T>
struct Null<AAT::Lookup<T>> {
static AAT::Lookup<T> const & get_null ()
{ return *reinterpret_cast<const AAT::Lookup<T> *> (_hb_Null_AAT_Lookup); }
};
namespace AAT {
DECLARE_NULL_NAMESPACE_BYTES_TEMPLATE1 (AAT, Lookup, 2);
enum { DELETED_GLYPH = 0xFFFF };
@ -681,6 +708,13 @@ struct ObsoleteTypes
const void *base,
const T *array)
{
/* https://github.com/harfbuzz/harfbuzz/issues/3483 */
/* If offset is less than base, return an offset that would
* result in an address half a 32bit address-space away,
* to make sure sanitize fails even on 32bit builds. */
if (unlikely (offset < unsigned ((const char *) array - (const char *) base)))
return INT_MAX / T::static_size;
/* https://github.com/harfbuzz/harfbuzz/issues/2816 */
return (offset - unsigned ((const char *) array - (const char *) base)) / T::static_size;
}
@ -744,16 +778,44 @@ struct StateTableDriver
num_glyphs (face_->get_num_glyphs ()) {}
template <typename context_t>
void drive (context_t *c)
void drive (context_t *c, hb_aat_apply_context_t *ac)
{
if (!c->in_place)
buffer->clear_output ();
int state = StateTableT::STATE_START_OF_TEXT;
// If there's only one range, we already checked the flag.
auto *last_range = ac->range_flags && (ac->range_flags->length > 1) ? &(*ac->range_flags)[0] : nullptr;
for (buffer->idx = 0; buffer->successful;)
{
/* This block is copied in NoncontextualSubtable::apply. Keep in sync. */
if (last_range)
{
auto *range = last_range;
if (buffer->idx < buffer->len)
{
unsigned cluster = buffer->cur().cluster;
while (cluster < range->cluster_first)
range--;
while (cluster > range->cluster_last)
range++;
last_range = range;
}
if (!(range->flags & ac->subtable_flags))
{
if (buffer->idx == buffer->len || unlikely (!buffer->successful))
break;
state = StateTableT::STATE_START_OF_TEXT;
(void) buffer->next_glyph ();
continue;
}
}
unsigned int klass = buffer->idx < buffer->len ?
machine.get_class (buffer->info[buffer->idx].codepoint, num_glyphs) :
machine.get_class (buffer->cur().codepoint, num_glyphs) :
(unsigned) StateTableT::CLASS_END_OF_TEXT;
DEBUG_MSG (APPLY, nullptr, "c%u at %u", klass, buffer->idx);
const EntryT &entry = machine.get_entry (state, klass);
@ -849,41 +911,6 @@ struct StateTableDriver
};
struct ankr;
struct hb_aat_apply_context_t :
hb_dispatch_context_t<hb_aat_apply_context_t, bool, HB_DEBUG_APPLY>
{
const char *get_name () { return "APPLY"; }
template <typename T>
return_t dispatch (const T &obj) { return obj.apply (this); }
static return_t default_return_value () { return false; }
bool stop_sublookup_iteration (return_t r) const { return r; }
const hb_ot_shape_plan_t *plan;
hb_font_t *font;
hb_face_t *face;
hb_buffer_t *buffer;
hb_sanitize_context_t sanitizer;
const ankr *ankr_table;
const OT::GDEF *gdef_table;
/* Unused. For debug tracing only. */
unsigned int lookup_index;
HB_INTERNAL hb_aat_apply_context_t (const hb_ot_shape_plan_t *plan_,
hb_font_t *font_,
hb_buffer_t *buffer_,
hb_blob_t *blob = const_cast<hb_blob_t *> (&Null (hb_blob_t)));
HB_INTERNAL ~hb_aat_apply_context_t ();
HB_INTERNAL void set_ankr_table (const AAT::ankr *ankr_table_);
void set_lookup_index (unsigned int i) { lookup_index = i; }
};
} /* namespace AAT */

View File

@ -62,7 +62,7 @@ struct SettingName
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
protected:

View File

@ -48,7 +48,7 @@ struct ActionSubrecordHeader
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
HBUINT16 actionClass; /* The JustClass value associated with this
@ -65,14 +65,14 @@ struct DecompositionAction
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
ActionSubrecordHeader
header;
HBFixed lowerLimit; /* If the distance factor is less than this value,
F16DOT16 lowerLimit; /* If the distance factor is less than this value,
* then the ligature is decomposed. */
HBFixed upperLimit; /* If the distance factor is greater than this value,
F16DOT16 upperLimit; /* If the distance factor is greater than this value,
* then the ligature is decomposed. */
HBUINT16 order; /* Numerical order in which this ligature will
* be decomposed; you may want infrequent ligatures
@ -112,13 +112,13 @@ struct ConditionalAddGlyphAction
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
protected:
ActionSubrecordHeader
header;
HBFixed substThreshold; /* Distance growth factor (in ems) at which
F16DOT16 substThreshold; /* Distance growth factor (in ems) at which
* this glyph is replaced and the growth factor
* recalculated. */
HBGlyphID16 addGlyph; /* Glyph to be added as kashida. If this value is
@ -137,7 +137,7 @@ struct DuctileGlyphAction
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
protected:
@ -146,13 +146,13 @@ struct DuctileGlyphAction
HBUINT32 variationAxis; /* The 4-byte tag identifying the ductile axis.
* This would normally be 0x64756374 ('duct'),
* but you may use any axis the font contains. */
HBFixed minimumLimit; /* The lowest value for the ductility axis that
F16DOT16 minimumLimit; /* The lowest value for the ductility axis that
* still yields an acceptable appearance. Normally
* this will be 1.0. */
HBFixed noStretchValue; /* This is the default value that corresponds to
F16DOT16 noStretchValue; /* This is the default value that corresponds to
* no change in appearance. Normally, this will
* be 1.0. */
HBFixed maximumLimit; /* The highest value for the ductility axis that
F16DOT16 maximumLimit; /* The highest value for the ductility axis that
* still yields an acceptable appearance. */
public:
DEFINE_SIZE_STATIC (22);
@ -163,7 +163,7 @@ struct RepeatedAddGlyphAction
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
protected:
@ -271,14 +271,14 @@ struct JustWidthDeltaEntry
};
protected:
HBFixed beforeGrowLimit;/* The ratio by which the advance width of the
F16DOT16 beforeGrowLimit;/* The ratio by which the advance width of the
* glyph is permitted to grow on the left or top side. */
HBFixed beforeShrinkLimit;
F16DOT16 beforeShrinkLimit;
/* The ratio by which the advance width of the
* glyph is permitted to shrink on the left or top side. */
HBFixed afterGrowLimit; /* The ratio by which the advance width of the glyph
F16DOT16 afterGrowLimit; /* The ratio by which the advance width of the glyph
* is permitted to shrink on the left or top side. */
HBFixed afterShrinkLimit;
F16DOT16 afterShrinkLimit;
/* The ratio by which the advance width of the glyph
* is at most permitted to shrink on the right or
* bottom side. */
@ -294,7 +294,7 @@ struct WidthDeltaPair
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
protected:

View File

@ -350,7 +350,7 @@ struct KerxSubTableFormat1
driver_context_t dc (this, c);
StateTableDriver<Types, EntryData> driver (machine, c->buffer, c->font->face);
driver.drive (&dc);
driver.drive (&dc, c);
return_trace (true);
}
@ -594,7 +594,7 @@ struct KerxSubTableFormat4
driver_context_t dc (this, c);
StateTableDriver<Types, EntryData> driver (machine, c->buffer, c->font->face);
driver.drive (&dc);
driver.drive (&dc, c);
return_trace (true);
}
@ -751,7 +751,7 @@ struct KerxSubTableHeader
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
public:
@ -869,6 +869,8 @@ struct KerxTable
bool apply (AAT::hb_aat_apply_context_t *c) const
{
c->buffer->unsafe_to_concat ();
typedef typename T::SubTable SubTable;
bool ret = false;
@ -889,7 +891,7 @@ struct KerxTable
reverse = bool (st->u.header.coverage & st->u.header.Backwards) !=
HB_DIRECTION_IS_BACKWARD (c->buffer->props.direction);
if (!c->buffer->message (c->font, "start subtable %d", c->lookup_index))
if (!c->buffer->message (c->font, "start subtable %u", c->lookup_index))
goto skip;
if (!seenCrossStream &&
@ -921,7 +923,7 @@ struct KerxTable
if (reverse)
c->buffer->reverse ();
(void) c->buffer->message (c->font, "end subtable %d", c->lookup_index);
(void) c->buffer->message (c->font, "end subtable %u", c->lookup_index);
skip:
st = &StructAfter<SubTable> (*st);

View File

@ -131,14 +131,14 @@ struct RearrangementSubtable
hb_glyph_info_t *info = buffer->info;
hb_glyph_info_t buf[4];
memcpy (buf, info + start, l * sizeof (buf[0]));
memcpy (buf + 2, info + end - r, r * sizeof (buf[0]));
hb_memcpy (buf, info + start, l * sizeof (buf[0]));
hb_memcpy (buf + 2, info + end - r, r * sizeof (buf[0]));
if (l != r)
memmove (info + start + r, info + start + l, (end - start - l - r) * sizeof (buf[0]));
memcpy (info + start, buf + 2, r * sizeof (buf[0]));
memcpy (info + end - l, buf, l * sizeof (buf[0]));
hb_memcpy (info + start, buf + 2, r * sizeof (buf[0]));
hb_memcpy (info + end - l, buf, l * sizeof (buf[0]));
if (reverse_l)
{
buf[0] = info[end - 1];
@ -169,7 +169,7 @@ struct RearrangementSubtable
driver_context_t dc (this);
StateTableDriver<Types, EntryData> driver (machine, c->buffer, c->face);
driver.drive (&dc);
driver.drive (&dc, c);
return_trace (dc.ret);
}
@ -325,7 +325,7 @@ struct ContextualSubtable
driver_context_t dc (this, c);
StateTableDriver<Types, EntryData> driver (machine, c->buffer, c->face);
driver.drive (&dc);
driver.drive (&dc, c);
return_trace (dc.ret);
}
@ -525,7 +525,7 @@ struct LigatureSubtable
if (unlikely (!componentData.sanitize (&c->sanitizer))) break;
ligature_idx += componentData;
DEBUG_MSG (APPLY, nullptr, "Action store %u last %u",
DEBUG_MSG (APPLY, nullptr, "Action store %d last %d",
bool (action & LigActionStore),
bool (action & LigActionLast));
if (action & (LigActionStore | LigActionLast))
@ -577,7 +577,7 @@ struct LigatureSubtable
driver_context_t dc (this, c);
StateTableDriver<Types, EntryData> driver (machine, c->buffer, c->face);
driver.drive (&dc);
driver.drive (&dc, c);
return_trace (dc.ret);
}
@ -618,8 +618,27 @@ struct NoncontextualSubtable
hb_glyph_info_t *info = c->buffer->info;
unsigned int count = c->buffer->len;
// If there's only one range, we already checked the flag.
auto *last_range = c->range_flags && (c->range_flags->length > 1) ? &(*c->range_flags)[0] : nullptr;
for (unsigned int i = 0; i < count; i++)
{
/* This block copied from StateTableDriver::drive. Keep in sync. */
if (last_range)
{
auto *range = last_range;
{
unsigned cluster = info[i].cluster;
while (cluster < range->cluster_first)
range--;
while (cluster > range->cluster_last)
range++;
last_range = range;
}
if (!(range->flags & c->subtable_flags))
continue;
}
const HBGlyphID16 *replacement = substitute.get_value (info[i].codepoint, num_glyphs);
if (replacement)
{
@ -820,7 +839,7 @@ struct InsertionSubtable
driver_context_t dc (this, c);
StateTableDriver<Types, EntryData> driver (machine, c->buffer, c->face);
driver.drive (&dc);
driver.drive (&dc, c);
return_trace (dc.ret);
}
@ -968,7 +987,7 @@ struct Chain
// Check whether this type/setting pair was requested in the map, and if so, apply its flags.
// (The search here only looks at the type and setting fields of feature_info_t.)
hb_aat_map_builder_t::feature_info_t info = { type, setting, false, 0 };
if (map->features.bsearch (info))
if (map->current_features.bsearch (info))
{
flags &= feature.disableFlags;
flags |= feature.enableFlags;
@ -980,13 +999,21 @@ struct Chain
setting = HB_AAT_LAYOUT_FEATURE_SELECTOR_LOWER_CASE_SMALL_CAPS;
goto retry;
}
#ifndef HB_NO_AAT
else if (type == HB_AAT_LAYOUT_FEATURE_TYPE_LANGUAGE_TAG_TYPE && setting &&
/* TODO: Rudimentary language matching. */
hb_language_matches (map->face->table.ltag->get_language (setting - 1), map->props.language))
{
flags &= feature.disableFlags;
flags |= feature.enableFlags;
}
#endif
}
}
return flags;
}
void apply (hb_aat_apply_context_t *c,
hb_mask_t flags) const
void apply (hb_aat_apply_context_t *c) const
{
const ChainSubtable<Types> *subtable = &StructAfter<ChainSubtable<Types>> (featureZ.as_array (featureCount));
unsigned int count = subtableCount;
@ -994,8 +1021,10 @@ struct Chain
{
bool reverse;
if (!(subtable->subFeatureFlags & flags))
if (hb_none (hb_iter (c->range_flags) |
hb_map ([&subtable] (const hb_aat_map_t::range_flags_t _) -> bool { return subtable->subFeatureFlags & (_.flags); })))
goto skip;
c->subtable_flags = subtable->subFeatureFlags;
if (!(subtable->get_coverage() & ChainSubtable<Types>::AllDirections) &&
HB_DIRECTION_IS_VERTICAL (c->buffer->props.direction) !=
@ -1034,7 +1063,7 @@ struct Chain
bool (subtable->get_coverage () & ChainSubtable<Types>::Backwards) !=
HB_DIRECTION_IS_BACKWARD (c->buffer->props.direction);
if (!c->buffer->message (c->font, "start chainsubtable %d", c->lookup_index))
if (!c->buffer->message (c->font, "start chainsubtable %u", c->lookup_index))
goto skip;
if (reverse)
@ -1045,7 +1074,7 @@ struct Chain
if (reverse)
c->buffer->reverse ();
(void) c->buffer->message (c->font, "end chainsubtable %d", c->lookup_index);
(void) c->buffer->message (c->font, "end chainsubtable %u", c->lookup_index);
if (unlikely (!c->buffer->successful)) return;
@ -1111,22 +1140,31 @@ struct mortmorx
{
const Chain<Types> *chain = &firstChain;
unsigned int count = chainCount;
if (unlikely (!map->chain_flags.resize (count)))
return;
for (unsigned int i = 0; i < count; i++)
{
map->chain_flags.push (chain->compile_flags (mapper));
map->chain_flags[i].push (hb_aat_map_t::range_flags_t {chain->compile_flags (mapper),
mapper->range_first,
mapper->range_last});
chain = &StructAfter<Chain<Types>> (*chain);
}
}
void apply (hb_aat_apply_context_t *c) const
void apply (hb_aat_apply_context_t *c,
const hb_aat_map_t &map) const
{
if (unlikely (!c->buffer->successful)) return;
c->buffer->unsafe_to_concat ();
c->set_lookup_index (0);
const Chain<Types> *chain = &firstChain;
unsigned int count = chainCount;
for (unsigned int i = 0; i < count; i++)
{
chain->apply (c, c->plan->aat_map.chain_flags[i]);
c->range_flags = &map.chain_flags[i];
chain->apply (c);
if (unlikely (!c->buffer->successful)) return;
chain = &StructAfter<Chain<Types>> (*chain);
}

View File

@ -42,7 +42,7 @@ struct OpticalBounds
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this)));
return_trace (c->check_struct (this));
}
FWORD leftSide;

View File

@ -62,7 +62,7 @@ struct TrackTableEntry
}
protected:
HBFixed track; /* Track value for this record. */
F16DOT16 track; /* Track value for this record. */
NameID trackNameID; /* The 'name' table index for this track.
* (a short word or phrase like "loose"
* or "very tight") */
@ -82,7 +82,7 @@ struct TrackData
const void *base) const
{
unsigned int sizes = nSizes;
hb_array_t<const HBFixed> size_table ((base+sizeTable).arrayZ, sizes);
hb_array_t<const F16DOT16> size_table ((base+sizeTable).arrayZ, sizes);
float s0 = size_table[idx].to_float ();
float s1 = size_table[idx + 1].to_float ();
@ -120,7 +120,7 @@ struct TrackData
if (!sizes) return 0.;
if (sizes == 1) return trackTableEntry->get_value (base, 0, sizes);
hb_array_t<const HBFixed> size_table ((base+sizeTable).arrayZ, sizes);
hb_array_t<const F16DOT16> size_table ((base+sizeTable).arrayZ, sizes);
unsigned int size_index;
for (size_index = 0; size_index < sizes - 1; size_index++)
if (size_table[size_index].to_float () >= ptem)
@ -141,7 +141,7 @@ struct TrackData
protected:
HBUINT16 nTracks; /* Number of separate tracks included in this table. */
HBUINT16 nSizes; /* Number of point sizes included in this table. */
NNOffset32To<UnsizedArrayOf<HBFixed>>
NNOffset32To<UnsizedArrayOf<F16DOT16>>
sizeTable; /* Offset from start of the tracking table to
* Array[nSizes] of size values.. */
UnsizedArrayOf<TrackTableEntry>

View File

@ -131,6 +131,7 @@ static const hb_aat_feature_mapping_t feature_mappings[] =
{HB_TAG ('p','n','u','m'), HB_AAT_LAYOUT_FEATURE_TYPE_NUMBER_SPACING, HB_AAT_LAYOUT_FEATURE_SELECTOR_PROPORTIONAL_NUMBERS, (hb_aat_layout_feature_selector_t) 4},
{HB_TAG ('p','w','i','d'), HB_AAT_LAYOUT_FEATURE_TYPE_TEXT_SPACING, HB_AAT_LAYOUT_FEATURE_SELECTOR_PROPORTIONAL_TEXT, (hb_aat_layout_feature_selector_t) 7},
{HB_TAG ('q','w','i','d'), HB_AAT_LAYOUT_FEATURE_TYPE_TEXT_SPACING, HB_AAT_LAYOUT_FEATURE_SELECTOR_QUARTER_WIDTH_TEXT, (hb_aat_layout_feature_selector_t) 7},
{HB_TAG ('r','l','i','g'), HB_AAT_LAYOUT_FEATURE_TYPE_LIGATURES, HB_AAT_LAYOUT_FEATURE_SELECTOR_REQUIRED_LIGATURES_ON, HB_AAT_LAYOUT_FEATURE_SELECTOR_REQUIRED_LIGATURES_OFF},
{HB_TAG ('r','u','b','y'), HB_AAT_LAYOUT_FEATURE_TYPE_RUBY_KANA, HB_AAT_LAYOUT_FEATURE_SELECTOR_RUBY_KANA_ON, HB_AAT_LAYOUT_FEATURE_SELECTOR_RUBY_KANA_OFF},
{HB_TAG ('s','i','n','f'), HB_AAT_LAYOUT_FEATURE_TYPE_VERTICAL_POSITION, HB_AAT_LAYOUT_FEATURE_SELECTOR_SCIENTIFIC_INFERIORS, HB_AAT_LAYOUT_FEATURE_SELECTOR_NORMAL_POSITION},
{HB_TAG ('s','m','c','p'), HB_AAT_LAYOUT_FEATURE_TYPE_LOWER_CASE, HB_AAT_LAYOUT_FEATURE_SELECTOR_LOWER_CASE_SMALL_CAPS, HB_AAT_LAYOUT_FEATURE_SELECTOR_DEFAULT_LOWER_CASE},
@ -229,7 +230,7 @@ hb_aat_layout_compile_map (const hb_aat_map_builder_t *mapper,
*
* <note>Note: does not examine the `GSUB` table.</note>
*
* Return value: %true if data found, %false otherwise
* Return value: `true` if data found, `false` otherwise
*
* Since: 2.3.0
*/
@ -243,15 +244,23 @@ hb_aat_layout_has_substitution (hb_face_t *face)
void
hb_aat_layout_substitute (const hb_ot_shape_plan_t *plan,
hb_font_t *font,
hb_buffer_t *buffer)
hb_buffer_t *buffer,
const hb_feature_t *features,
unsigned num_features)
{
hb_aat_map_builder_t builder (font->face, plan->props);
for (unsigned i = 0; i < num_features; i++)
builder.add_feature (features[i]);
hb_aat_map_t map;
builder.compile (map);
hb_blob_t *morx_blob = font->face->table.morx.get_blob ();
const AAT::morx& morx = *morx_blob->as<AAT::morx> ();
if (morx.has_data ())
{
AAT::hb_aat_apply_context_t c (plan, font, buffer, morx_blob);
if (!buffer->message (font, "start table morx")) return;
morx.apply (&c);
morx.apply (&c, map);
(void) buffer->message (font, "end table morx");
return;
}
@ -262,7 +271,7 @@ hb_aat_layout_substitute (const hb_ot_shape_plan_t *plan,
{
AAT::hb_aat_apply_context_t c (plan, font, buffer, mort_blob);
if (!buffer->message (font, "start table mort")) return;
mort.apply (&c);
mort.apply (&c, map);
(void) buffer->message (font, "end table mort");
return;
}
@ -288,7 +297,7 @@ is_deleted_glyph (const hb_glyph_info_t *info)
void
hb_aat_layout_remove_deleted_glyphs (hb_buffer_t *buffer)
{
hb_ot_layout_delete_glyphs_inplace (buffer, is_deleted_glyph);
buffer->delete_glyphs_inplace (is_deleted_glyph);
}
/**
@ -300,7 +309,7 @@ hb_aat_layout_remove_deleted_glyphs (hb_buffer_t *buffer)
*
* <note>Note: does not examine the `GPOS` table.</note>
*
* Return value: %true if data found, %false otherwise
* Return value: `true` if data found, `false` otherwise
*
* Since: 2.3.0
*/
@ -333,7 +342,7 @@ hb_aat_layout_position (const hb_ot_shape_plan_t *plan,
* Tests whether the specified face includes any tracking information
* in the `trak` table.
*
* Return value: %true if data found, %false otherwise
* Return value: `true` if data found, `false` otherwise
*
* Since: 2.3.0
*/

View File

@ -53,7 +53,9 @@ hb_aat_layout_compile_map (const hb_aat_map_builder_t *mapper,
HB_INTERNAL void
hb_aat_layout_substitute (const hb_ot_shape_plan_t *plan,
hb_font_t *font,
hb_buffer_t *buffer);
hb_buffer_t *buffer,
const hb_feature_t *features,
unsigned num_features);
HB_INTERNAL void
hb_aat_layout_zero_width_deleted_glyphs (hb_buffer_t *buffer);

View File

@ -36,27 +36,29 @@
#include "hb-aat-layout-feat-table.hh"
void hb_aat_map_builder_t::add_feature (hb_tag_t tag, unsigned value)
void hb_aat_map_builder_t::add_feature (const hb_feature_t &feature)
{
if (!face->table.feat->has_data ()) return;
if (tag == HB_TAG ('a','a','l','t'))
if (feature.tag == HB_TAG ('a','a','l','t'))
{
if (!face->table.feat->exposes_feature (HB_AAT_LAYOUT_FEATURE_TYPE_CHARACTER_ALTERNATIVES))
return;
feature_info_t *info = features.push();
info->type = HB_AAT_LAYOUT_FEATURE_TYPE_CHARACTER_ALTERNATIVES;
info->setting = (hb_aat_layout_feature_selector_t) value;
info->seq = features.length;
info->is_exclusive = true;
feature_range_t *range = features.push();
range->start = feature.start;
range->end = feature.end;
range->info.type = HB_AAT_LAYOUT_FEATURE_TYPE_CHARACTER_ALTERNATIVES;
range->info.setting = (hb_aat_layout_feature_selector_t) feature.value;
range->info.seq = features.length;
range->info.is_exclusive = true;
return;
}
const hb_aat_feature_mapping_t *mapping = hb_aat_layout_find_feature_mapping (tag);
const hb_aat_feature_mapping_t *mapping = hb_aat_layout_find_feature_mapping (feature.tag);
if (!mapping) return;
const AAT::FeatureName* feature = &face->table.feat->get_feature (mapping->aatFeatureType);
if (!feature->has_data ())
const AAT::FeatureName* feature_name = &face->table.feat->get_feature (mapping->aatFeatureType);
if (!feature_name->has_data ())
{
/* Special case: Chain::compile_flags will fall back to the deprecated version of
* small-caps if necessary, so we need to check for that possibility.
@ -64,38 +66,106 @@ void hb_aat_map_builder_t::add_feature (hb_tag_t tag, unsigned value)
if (mapping->aatFeatureType == HB_AAT_LAYOUT_FEATURE_TYPE_LOWER_CASE &&
mapping->selectorToEnable == HB_AAT_LAYOUT_FEATURE_SELECTOR_LOWER_CASE_SMALL_CAPS)
{
feature = &face->table.feat->get_feature (HB_AAT_LAYOUT_FEATURE_TYPE_LETTER_CASE);
if (!feature->has_data ()) return;
feature_name = &face->table.feat->get_feature (HB_AAT_LAYOUT_FEATURE_TYPE_LETTER_CASE);
if (!feature_name->has_data ()) return;
}
else return;
}
feature_info_t *info = features.push();
info->type = mapping->aatFeatureType;
info->setting = value ? mapping->selectorToEnable : mapping->selectorToDisable;
info->seq = features.length;
info->is_exclusive = feature->is_exclusive ();
feature_range_t *range = features.push();
range->start = feature.start;
range->end = feature.end;
range->info.type = mapping->aatFeatureType;
range->info.setting = feature.value ? mapping->selectorToEnable : mapping->selectorToDisable;
range->info.seq = features.length;
range->info.is_exclusive = feature_name->is_exclusive ();
}
void
hb_aat_map_builder_t::compile (hb_aat_map_t &m)
{
/* Sort features and merge duplicates */
if (features.length)
/* Compute active features per range, and compile each. */
/* Sort features by start/end events. */
hb_vector_t<feature_event_t> feature_events;
for (unsigned int i = 0; i < features.length; i++)
{
features.qsort ();
unsigned int j = 0;
for (unsigned int i = 1; i < features.length; i++)
if (features[i].type != features[j].type ||
/* Nonexclusive feature selectors come in even/odd pairs to turn a setting on/off
* respectively, so we mask out the low-order bit when checking for "duplicates"
* (selectors referring to the same feature setting) here. */
(!features[i].is_exclusive && ((features[i].setting & ~1) != (features[j].setting & ~1))))
features[++j] = features[i];
features.shrink (j + 1);
auto &feature = features[i];
if (features[i].start == features[i].end)
continue;
feature_event_t *event;
event = feature_events.push ();
event->index = features[i].start;
event->start = true;
event->feature = feature.info;
event = feature_events.push ();
event->index = features[i].end;
event->start = false;
event->feature = feature.info;
}
feature_events.qsort ();
/* Add a strategic final event. */
{
feature_info_t feature;
feature.seq = features.length + 1;
feature_event_t *event = feature_events.push ();
event->index = -1; /* This value does magic. */
event->start = false;
event->feature = feature;
}
hb_aat_layout_compile_map (this, &m);
/* Scan events and save features for each range. */
hb_sorted_vector_t<feature_info_t> active_features;
unsigned int last_index = 0;
for (unsigned int i = 0; i < feature_events.length; i++)
{
feature_event_t *event = &feature_events[i];
if (event->index != last_index)
{
/* Save a snapshot of active features and the range. */
/* Sort features and merge duplicates */
current_features = active_features;
range_first = last_index;
range_last = event->index - 1;
if (current_features.length)
{
current_features.qsort ();
unsigned int j = 0;
for (unsigned int i = 1; i < current_features.length; i++)
if (current_features[i].type != current_features[j].type ||
/* Nonexclusive feature selectors come in even/odd pairs to turn a setting on/off
* respectively, so we mask out the low-order bit when checking for "duplicates"
* (selectors referring to the same feature setting) here. */
(!current_features[i].is_exclusive && ((current_features[i].setting & ~1) != (current_features[j].setting & ~1))))
current_features[++j] = current_features[i];
current_features.shrink (j + 1);
}
hb_aat_layout_compile_map (this, &m);
last_index = event->index;
}
if (event->start)
{
active_features.push (event->feature);
} else {
feature_info_t *feature = active_features.lsearch (event->feature);
if (feature)
active_features.remove_ordered (feature - active_features.arrayZ);
}
}
for (auto &chain_flags : m.chain_flags)
// With our above setup this value is one less than desired; adjust it.
chain_flags.tail().cluster_last = HB_FEATURE_GLOBAL_END;
}

View File

@ -35,16 +35,15 @@ struct hb_aat_map_t
friend struct hb_aat_map_builder_t;
public:
void init ()
struct range_flags_t
{
memset (this, 0, sizeof (*this));
chain_flags.init ();
}
void fini () { chain_flags.fini (); }
hb_mask_t flags;
unsigned cluster_first;
unsigned cluster_last; // end - 1
};
public:
hb_vector_t<hb_mask_t> chain_flags;
hb_vector_t<hb_sorted_vector_t<range_flags_t>> chain_flags;
};
struct hb_aat_map_builder_t
@ -52,10 +51,11 @@ struct hb_aat_map_builder_t
public:
HB_INTERNAL hb_aat_map_builder_t (hb_face_t *face_,
const hb_segment_properties_t *props_ HB_UNUSED) :
face (face_) {}
const hb_segment_properties_t props_) :
face (face_),
props (props_) {}
HB_INTERNAL void add_feature (hb_tag_t tag, unsigned int value=1);
HB_INTERNAL void add_feature (const hb_feature_t &feature);
HB_INTERNAL void compile (hb_aat_map_t &m);
@ -77,7 +77,7 @@ struct hb_aat_map_builder_t
return (a->seq < b->seq ? -1 : a->seq > b->seq ? 1 : 0);
}
/* compares type & setting only, not is_exclusive flag or seq number */
/* compares type & setting only */
int cmp (const feature_info_t& f) const
{
return (f.type != type) ? (f.type < type ? -1 : 1) :
@ -85,11 +85,38 @@ struct hb_aat_map_builder_t
}
};
public:
hb_face_t *face;
struct feature_range_t
{
feature_info_t info;
unsigned start;
unsigned end;
};
private:
struct feature_event_t
{
unsigned int index;
bool start;
feature_info_t feature;
HB_INTERNAL static int cmp (const void *pa, const void *pb) {
const feature_event_t *a = (const feature_event_t *) pa;
const feature_event_t *b = (const feature_event_t *) pb;
return a->index < b->index ? -1 : a->index > b->index ? 1 :
a->start < b->start ? -1 : a->start > b->start ? 1 :
feature_info_t::cmp (&a->feature, &b->feature);
}
};
public:
hb_sorted_vector_t<feature_info_t> features;
hb_face_t *face;
hb_segment_properties_t props;
public:
hb_sorted_vector_t<feature_range_t> features;
hb_sorted_vector_t<feature_info_t> current_features;
unsigned range_first = HB_FEATURE_GLOBAL_START;
unsigned range_last = HB_FEATURE_GLOBAL_END;
};

View File

@ -109,15 +109,16 @@ struct BEInt<Type, 2>
struct __attribute__((packed)) packed_uint16_t { uint16_t v; };
constexpr operator Type () const
{
#if ((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__)) && \
#if defined(__OPTIMIZE__) && !defined(HB_NO_PACKED) && \
((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__)) && \
defined(__BYTE_ORDER) && \
(__BYTE_ORDER == __LITTLE_ENDIAN || __BYTE_ORDER == __BIG_ENDIAN)
/* Spoon-feed the compiler a big-endian integer with alignment 1.
* https://github.com/harfbuzz/harfbuzz/pull/1398 */
#if __BYTE_ORDER == __LITTLE_ENDIAN
return __builtin_bswap16 (((packed_uint16_t *) this)->v);
return __builtin_bswap16 (((packed_uint16_t *) v)->v);
#else /* __BYTE_ORDER == __BIG_ENDIAN */
return ((packed_uint16_t *) this)->v;
return ((packed_uint16_t *) v)->v;
#endif
#else
return (v[0] << 8)
@ -153,15 +154,16 @@ struct BEInt<Type, 4>
struct __attribute__((packed)) packed_uint32_t { uint32_t v; };
constexpr operator Type () const {
#if ((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__)) && \
#if defined(__OPTIMIZE__) && !defined(HB_NO_PACKED) && \
((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__)) && \
defined(__BYTE_ORDER) && \
(__BYTE_ORDER == __LITTLE_ENDIAN || __BYTE_ORDER == __BIG_ENDIAN)
/* Spoon-feed the compiler a big-endian integer with alignment 1.
* https://github.com/harfbuzz/harfbuzz/pull/1398 */
#if __BYTE_ORDER == __LITTLE_ENDIAN
return __builtin_bswap32 (((packed_uint32_t *) this)->v);
return __builtin_bswap32 (((packed_uint32_t *) v)->v);
#else /* __BYTE_ORDER == __BIG_ENDIAN */
return ((packed_uint32_t *) this)->v;
return ((packed_uint32_t *) v)->v;
#endif
#else
return (v[0] << 24)
@ -234,17 +236,6 @@ struct
template <typename T> constexpr auto
impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, hb_deref (v).hash ())
template <typename T> constexpr uint32_t
impl (const hb::shared_ptr<T>& v, hb_priority<1>) const
{
return v.get () ? v.get ()->hash () : 0;
}
template <typename T> constexpr uint32_t
impl (const hb::unique_ptr<T>& v, hb_priority<1>) const
{
return v.get () ? v.get ()->hash () : 0;
}
template <typename T> constexpr auto
impl (const T& v, hb_priority<0>) const HB_RETURN (uint32_t, std::hash<hb_decay<decltype (hb_deref (v))>>{} (hb_deref (v)))
@ -493,6 +484,17 @@ struct
}
HB_FUNCOBJ (hb_equal);
struct
{
template <typename T> void
operator () (T& a, T& b) const
{
using std::swap; // allow ADL
swap (a, b);
}
}
HB_FUNCOBJ (hb_swap);
template <typename T1, typename T2>
struct hb_pair_t
@ -505,7 +507,7 @@ struct hb_pair_t
hb_enable_if (std::is_default_constructible<U1>::value &&
std::is_default_constructible<U2>::value)>
hb_pair_t () : first (), second () {}
hb_pair_t (T1 a, T2 b) : first (a), second (b) {}
hb_pair_t (T1 a, T2 b) : first (std::forward<T1> (a)), second (std::forward<T2> (b)) {}
template <typename Q1, typename Q2,
hb_enable_if (hb_is_convertible (T1, Q1) &&
@ -522,10 +524,28 @@ struct hb_pair_t
bool operator > (const pair_t& o) const { return first > o.first || (first == o.first && second > o.second); }
bool operator <= (const pair_t& o) const { return !(*this > o); }
static int cmp (const void *pa, const void *pb)
{
pair_t *a = (pair_t *) pa;
pair_t *b = (pair_t *) pb;
if (a->first < b->first) return -1;
if (a->first > b->first) return +1;
if (a->second < b->second) return -1;
if (a->second > b->second) return +1;
return 0;
}
friend void swap (hb_pair_t& a, hb_pair_t& b)
{
hb_swap (a.first, b.first);
hb_swap (a.second, b.second);
}
T1 first;
T2 second;
};
#define hb_pair_t(T1,T2) hb_pair_t<T1, T2>
template <typename T1, typename T2> static inline hb_pair_t<T1, T2>
hb_pair (T1&& a, T2&& b) { return hb_pair_t<T1, T2> (a, b); }
@ -551,14 +571,14 @@ struct
{
template <typename T, typename T2> constexpr auto
operator () (T&& a, T2&& b) const HB_AUTO_RETURN
(a <= b ? std::forward<T> (a) : std::forward<T2> (b))
(a <= b ? a : b)
}
HB_FUNCOBJ (hb_min);
struct
{
template <typename T, typename T2> constexpr auto
operator () (T&& a, T2&& b) const HB_AUTO_RETURN
(a >= b ? std::forward<T> (a) : std::forward<T2> (b))
(a >= b ? a : b)
}
HB_FUNCOBJ (hb_max);
struct
@ -569,17 +589,6 @@ struct
}
HB_FUNCOBJ (hb_clamp);
struct
{
template <typename T> void
operator () (T& a, T& b) const
{
using std::swap; // allow ADL
swap (a, b);
}
}
HB_FUNCOBJ (hb_swap);
/*
* Bithacks.
*/
@ -848,19 +857,14 @@ hb_in_range (T u, T lo, T hi)
return (T)(u - lo) <= (T)(hi - lo);
}
template <typename T> static inline bool
hb_in_ranges (T u, T lo1, T hi1, T lo2, T hi2)
hb_in_ranges (T u, T lo1, T hi1)
{
return hb_in_range (u, lo1, hi1) || hb_in_range (u, lo2, hi2);
return hb_in_range (u, lo1, hi1);
}
template <typename T> static inline bool
hb_in_ranges (T u, T lo1, T hi1, T lo2, T hi2, T lo3, T hi3)
template <typename T, typename ...Ts> static inline bool
hb_in_ranges (T u, T lo1, T hi1, Ts... ds)
{
return hb_in_range (u, lo1, hi1) || hb_in_range (u, lo2, hi2) || hb_in_range (u, lo3, hi3);
}
template <typename T> static inline bool
hb_in_ranges (T u, T lo1, T hi1, T lo2, T hi2, T lo3, T hi3, T lo4, T hi4)
{
return hb_in_range (u, lo1, hi1) || hb_in_range (u, lo2, hi2) || hb_in_range (u, lo3, hi3) || hb_in_range (u, lo4, hi4);
return hb_in_range<T> (u, lo1, hi1) || hb_in_ranges<T> (u, ds...);
}
@ -868,10 +872,18 @@ hb_in_ranges (T u, T lo1, T hi1, T lo2, T hi2, T lo3, T hi3, T lo4, T hi4)
* Overflow checking.
*/
/* Consider __builtin_mul_overflow use here also */
static inline bool
hb_unsigned_mul_overflows (unsigned int count, unsigned int size)
hb_unsigned_mul_overflows (unsigned int count, unsigned int size, unsigned *result = nullptr)
{
#if (defined(__GNUC__) && (__GNUC__ >= 4)) || (defined(__clang__) && (__clang_major__ >= 8))
unsigned stack_result;
if (!result)
result = &stack_result;
return __builtin_mul_overflow (count, size, result);
#endif
if (result)
*result = count * size;
return (size > 0) && (count >= ((unsigned int) -1) / size);
}
@ -972,7 +984,7 @@ void hb_qsort(void *base, size_t nel, size_t width,
[void *arg]);
*/
#define SORT_R_SWAP(a,b,tmp) ((tmp) = (a), (a) = (b), (b) = (tmp))
#define SORT_R_SWAP(a,b,tmp) ((void) ((tmp) = (a)), (void) ((a) = (b)), (b) = (tmp))
/* swap a and b */
/* a and b must not be equal! */
@ -1163,9 +1175,12 @@ hb_qsort (void *base, size_t nel, size_t width,
}
template <typename T, typename T2, typename T3> static inline void
hb_stable_sort (T *array, unsigned int len, int(*compar)(const T2 *, const T2 *), T3 *array2)
template <typename T, typename T2, typename T3 = int> static inline void
hb_stable_sort (T *array, unsigned int len, int(*compar)(const T2 *, const T2 *), T3 *array2 = nullptr)
{
static_assert (hb_is_trivially_copy_assignable (T), "");
static_assert (hb_is_trivially_copy_assignable (T3), "");
for (unsigned int i = 1; i < len; i++)
{
unsigned int j = i;
@ -1188,12 +1203,6 @@ hb_stable_sort (T *array, unsigned int len, int(*compar)(const T2 *, const T2 *)
}
}
template <typename T> static inline void
hb_stable_sort (T *array, unsigned int len, int(*compar)(const T *, const T *))
{
hb_stable_sort (array, len, compar, (int *) nullptr);
}
static inline hb_bool_t
hb_codepoint_parse (const char *s, unsigned int len, int base, hb_codepoint_t *out)
{
@ -1321,47 +1330,4 @@ struct
HB_FUNCOBJ (hb_dec);
/* Compiler-assisted vectorization. */
/* Type behaving similar to vectorized vars defined using __attribute__((vector_size(...))),
* basically a fixed-size bitset. */
template <typename elt_t, unsigned int byte_size>
struct hb_vector_size_t
{
elt_t& operator [] (unsigned int i) { return v[i]; }
const elt_t& operator [] (unsigned int i) const { return v[i]; }
void clear (unsigned char v = 0) { memset (this, v, sizeof (*this)); }
template <typename Op>
hb_vector_size_t process (const Op& op) const
{
hb_vector_size_t r;
for (unsigned int i = 0; i < ARRAY_LENGTH (v); i++)
r.v[i] = op (v[i]);
return r;
}
template <typename Op>
hb_vector_size_t process (const Op& op, const hb_vector_size_t &o) const
{
hb_vector_size_t r;
for (unsigned int i = 0; i < ARRAY_LENGTH (v); i++)
r.v[i] = op (v[i], o.v[i]);
return r;
}
hb_vector_size_t operator | (const hb_vector_size_t &o) const
{ return process (hb_bitwise_or, o); }
hb_vector_size_t operator & (const hb_vector_size_t &o) const
{ return process (hb_bitwise_and, o); }
hb_vector_size_t operator ^ (const hb_vector_size_t &o) const
{ return process (hb_bitwise_xor, o); }
hb_vector_size_t operator ~ () const
{ return process (hb_bitwise_neg); }
private:
static_assert (0 == byte_size % sizeof (elt_t), "");
elt_t v[byte_size / sizeof (elt_t)];
};
#endif /* HB_ALGS_HH */

View File

@ -100,10 +100,17 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
/* Ouch. The operator== compares the contents of the array. For range-based for loops,
* it's best if we can just compare arrayZ, though comparing contents is still fast,
* but also would require that Type has operator==. As such, we optimize this operator
* for range-based for loop and just compare arrayZ. No need to compare length, as we
* assume we're only compared to .end(). */
* for range-based for loop and just compare arrayZ and length.
*
* The above comment is outdated now because we implemented separate begin/end to
* objects that were using hb_array_t for range-based loop before. */
bool operator != (const hb_array_t& o) const
{ return arrayZ != o.arrayZ; }
{ return this->arrayZ != o.arrayZ || this->length != o.length; }
/* Faster range-based for loop without bounds-check. */
Type *begin () const { return arrayZ; }
Type *end () const { return arrayZ + length; }
/* Extra operators.
*/
@ -113,11 +120,11 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
HB_INTERNAL bool operator == (const hb_array_t &o) const;
uint32_t hash () const {
uint32_t hash () const
{
uint32_t current = 0;
for (unsigned int i = 0; i < this->length; i++) {
current = current * 31 + hb_hash (this->arrayZ[i]);
}
for (auto &v : *this)
current = current * 31 + hb_hash (v);
return current;
}
@ -185,23 +192,18 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
hb_sorted_array_t<Type> qsort (int (*cmp_)(const void*, const void*))
{
//static_assert (hb_enable_if (hb_is_trivially_copy_assignable(Type)), "");
if (likely (length))
hb_qsort (arrayZ, length, this->get_item_size (), cmp_);
return hb_sorted_array_t<Type> (*this);
}
hb_sorted_array_t<Type> qsort ()
{
//static_assert (hb_enable_if (hb_is_trivially_copy_assignable(Type)), "");
if (likely (length))
hb_qsort (arrayZ, length, this->get_item_size (), Type::cmp);
return hb_sorted_array_t<Type> (*this);
}
void qsort (unsigned int start, unsigned int end)
{
end = hb_min (end, length);
assert (start <= end);
if (likely (start < end))
hb_qsort (arrayZ + start, end - start, this->get_item_size (), Type::cmp);
}
/*
* Other methods.
@ -220,11 +222,8 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
if (end < start + 2)
return;
for (unsigned lhs = start, rhs = end - 1; lhs < rhs; lhs++, rhs--) {
Type temp = arrayZ[rhs];
arrayZ[rhs] = arrayZ[lhs];
arrayZ[lhs] = temp;
}
for (unsigned lhs = start, rhs = end - 1; lhs < rhs; lhs++, rhs--)
hb_swap (arrayZ[rhs], arrayZ[lhs]);
}
hb_array_t sub_array (unsigned int start_offset = 0, unsigned int *seg_count = nullptr /* IN/OUT */) const
@ -266,17 +265,31 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
void fini ()
{ hb_free ((void *) arrayZ); arrayZ = nullptr; length = 0; }
template <typename hb_serialize_context_t>
template <typename hb_serialize_context_t,
typename U = Type,
hb_enable_if (!(sizeof (U) < sizeof (long long) && hb_is_trivially_copy_assignable(hb_decay<Type>)))>
hb_array_t copy (hb_serialize_context_t *c) const
{
TRACE_SERIALIZE (this);
auto* out = c->start_embed (arrayZ);
if (unlikely (!c->extend_size (out, get_size ()))) return_trace (hb_array_t ());
if (unlikely (!c->extend_size (out, get_size (), false))) return_trace (hb_array_t ());
for (unsigned i = 0; i < length; i++)
out[i] = arrayZ[i]; /* TODO: add version that calls c->copy() */
return_trace (hb_array_t (out, length));
}
template <typename hb_serialize_context_t,
typename U = Type,
hb_enable_if (sizeof (U) < sizeof (long long) && hb_is_trivially_copy_assignable(hb_decay<Type>))>
hb_array_t copy (hb_serialize_context_t *c) const
{
TRACE_SERIALIZE (this);
auto* out = c->start_embed (arrayZ);
if (unlikely (!c->extend_size (out, get_size (), false))) return_trace (hb_array_t ());
hb_memcpy (out, arrayZ, get_size ());
return_trace (hb_array_t (out, length));
}
template <typename hb_sanitize_context_t>
bool sanitize (hb_sanitize_context_t *c) const
{ return c->check_array (arrayZ, length); }
@ -291,6 +304,9 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
unsigned int backwards_length = 0;
};
template <typename T> inline hb_array_t<T>
hb_array ()
{ return hb_array_t<T> (); }
template <typename T> inline hb_array_t<T>
hb_array (T *array, unsigned int length)
{ return hb_array_t<T> (array, length); }
template <typename T, unsigned int length_> inline hb_array_t<T>
@ -299,8 +315,8 @@ hb_array (T (&array_)[length_])
template <typename Type>
struct hb_sorted_array_t :
hb_iter_t<hb_sorted_array_t<Type>, Type&>,
hb_array_t<Type>
hb_array_t<Type>,
hb_iter_t<hb_sorted_array_t<Type>, Type&>
{
typedef hb_iter_t<hb_sorted_array_t, Type&> iter_base_t;
HB_ITER_USING (iter_base_t);
@ -320,17 +336,24 @@ struct hb_sorted_array_t :
template <typename U,
hb_enable_if (hb_is_cr_convertible(U, Type))>
constexpr hb_sorted_array_t (const hb_array_t<U> &o) :
hb_iter_t<hb_sorted_array_t, Type&> (),
hb_array_t<Type> (o) {}
hb_array_t<Type> (o),
hb_iter_t<hb_sorted_array_t, Type&> () {}
template <typename U,
hb_enable_if (hb_is_cr_convertible(U, Type))>
hb_sorted_array_t& operator = (const hb_array_t<U> &o)
{ hb_array_t<Type> (*this) = o; return *this; }
/* Iterator implementation. */
/* See comment in hb_array_of::operator != */
bool operator != (const hb_sorted_array_t& o) const
{ return this->arrayZ != o.arrayZ || this->length != o.length; }
/* Faster range-based for loop without bounds-check. */
Type *begin () const { return this->arrayZ; }
Type *end () const { return this->arrayZ + this->length; }
hb_sorted_array_t sub_array (unsigned int start_offset, unsigned int *seg_count /* IN/OUT */) const
{ return hb_sorted_array_t (((const hb_array_t<Type> *) (this))->sub_array (start_offset, seg_count)); }
hb_sorted_array_t sub_array (unsigned int start_offset, unsigned int seg_count) const
@ -344,7 +367,7 @@ struct hb_sorted_array_t :
unsigned int i;
return bfind (x, &i) ? &this->arrayZ[i] : not_found;
}
template <typename T, typename ...Ts>
template <typename T>
const Type *bsearch (const T &x, const Type *not_found = nullptr) const
{
unsigned int i;
@ -391,7 +414,7 @@ struct hb_sorted_array_t :
this->length,
sizeof (Type),
_hb_cmp_method<T, Type, Ts...>,
ds...);
std::forward<Ts> (ds)...);
}
};
template <typename T> inline hb_sorted_array_t<T>
@ -423,18 +446,42 @@ inline bool hb_array_t<const unsigned char>::operator == (const hb_array_t<const
return 0 == hb_memcmp (arrayZ, o.arrayZ, length);
}
/* Specialize hash() for byte arrays. */
template <>
inline uint32_t hb_array_t<const char>::hash () const {
inline uint32_t hb_array_t<const char>::hash () const
{
uint32_t current = 0;
for (unsigned int i = 0; i < this->length; i++)
current = current * 31 + (uint32_t) (this->arrayZ[i] * 2654435761u);
unsigned i = 0;
#if defined(__OPTIMIZE__) && !defined(HB_NO_PACKED) && \
((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__))
struct __attribute__((packed)) packed_uint32_t { uint32_t v; };
for (; i + 4 <= this->length; i += 4)
current = current * 31 + hb_hash ((uint32_t) ((packed_uint32_t *) &this->arrayZ[i])->v);
#endif
for (; i < this->length; i++)
current = current * 31 + hb_hash (this->arrayZ[i]);
return current;
}
template <>
inline uint32_t hb_array_t<const unsigned char>::hash () const {
inline uint32_t hb_array_t<const unsigned char>::hash () const
{
uint32_t current = 0;
for (unsigned int i = 0; i < this->length; i++)
current = current * 31 + (uint32_t) (this->arrayZ[i] * 2654435761u);
unsigned i = 0;
#if defined(__OPTIMIZE__) && !defined(HB_NO_PACKED) && \
((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__))
struct __attribute__((packed)) packed_uint32_t { uint32_t v; };
for (; i + 4 <= this->length; i += 4)
current = current * 31 + hb_hash ((uint32_t) ((packed_uint32_t *) &this->arrayZ[i])->v);
#endif
for (; i < this->length; i++)
current = current * 31 + hb_hash (this->arrayZ[i]);
return current;
}

View File

@ -84,11 +84,11 @@ _hb_atomic_ptr_impl_cmplexch (const void **P, const void *O_, const void *N)
#define _hb_memory_r_barrier() std::atomic_thread_fence(std::memory_order_acquire)
#define _hb_memory_w_barrier() std::atomic_thread_fence(std::memory_order_release)
#define hb_atomic_int_impl_add(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->fetch_add ((V), std::memory_order_acq_rel))
#define hb_atomic_int_impl_set_relaxed(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V), std::memory_order_relaxed))
#define hb_atomic_int_impl_set(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V), std::memory_order_release))
#define hb_atomic_int_impl_get_relaxed(AI) (reinterpret_cast<std::atomic<int> const *> (AI)->load (std::memory_order_relaxed))
#define hb_atomic_int_impl_get(AI) (reinterpret_cast<std::atomic<int> const *> (AI)->load (std::memory_order_acquire))
#define hb_atomic_int_impl_add(AI, V) (reinterpret_cast<std::atomic<std::decay<decltype (*(AI))>::type> *> (AI)->fetch_add ((V), std::memory_order_acq_rel))
#define hb_atomic_int_impl_set_relaxed(AI, V) (reinterpret_cast<std::atomic<std::decay<decltype (*(AI))>::type> *> (AI)->store ((V), std::memory_order_relaxed))
#define hb_atomic_int_impl_set(AI, V) (reinterpret_cast<std::atomic<std::decay<decltype (*(AI))>::type> *> (AI)->store ((V), std::memory_order_release))
#define hb_atomic_int_impl_get_relaxed(AI) (reinterpret_cast<std::atomic<std::decay<decltype (*(AI))>::type> const *> (AI)->load (std::memory_order_relaxed))
#define hb_atomic_int_impl_get(AI) (reinterpret_cast<std::atomic<std::decay<decltype (*(AI))>::type> const *> (AI)->load (std::memory_order_acquire))
#define hb_atomic_ptr_impl_set_relaxed(P, V) (reinterpret_cast<std::atomic<void*> *> (P)->store ((V), std::memory_order_relaxed))
#define hb_atomic_ptr_impl_get_relaxed(P) (reinterpret_cast<std::atomic<void*> const *> (P)->load (std::memory_order_relaxed))
@ -111,6 +111,24 @@ _hb_atomic_ptr_impl_cmplexch (const void **P, const void *O_, const void *N)
#endif
/* This should never be disabled, even under HB_NO_MT.
* except that MSVC gives me an internal compiler error, so disabled there.
*
* https://github.com/harfbuzz/harfbuzz/pull/4119
*/
#ifndef _hb_compiler_memory_r_barrier
#if defined(__ATOMIC_ACQUIRE) // gcc-like
#define _hb_compiler_memory_r_barrier() asm volatile("": : :"memory")
#elif !defined(_MSC_VER)
#include <atomic>
#define _hb_compiler_memory_r_barrier() std::atomic_signal_fence (std::memory_order_acquire)
#else
#define _hb_compiler_memory_r_barrier() do {} while (0)
#endif
#endif
#ifndef _hb_memory_r_barrier
#define _hb_memory_r_barrier() _hb_memory_barrier ()
#endif
@ -132,24 +150,47 @@ _hb_atomic_ptr_impl_cmplexch (const void **P, const void *O_, const void *N)
#endif
#ifndef hb_atomic_int_impl_set
inline void hb_atomic_int_impl_set (int *AI, int v) { _hb_memory_w_barrier (); *AI = v; }
inline void hb_atomic_int_impl_set (short *AI, short v) { _hb_memory_w_barrier (); *AI = v; }
#endif
#ifndef hb_atomic_int_impl_get
inline int hb_atomic_int_impl_get (const int *AI) { int v = *AI; _hb_memory_r_barrier (); return v; }
inline short hb_atomic_int_impl_get (const short *AI) { short v = *AI; _hb_memory_r_barrier (); return v; }
#endif
#ifndef hb_atomic_ptr_impl_get
inline void *hb_atomic_ptr_impl_get (void ** const P) { void *v = *P; _hb_memory_r_barrier (); return v; }
#endif
struct hb_atomic_short_t
{
hb_atomic_short_t () = default;
constexpr hb_atomic_short_t (short v) : v (v) {}
hb_atomic_short_t& operator = (short v_) { set_relaxed (v_); return *this; }
operator short () const { return get_relaxed (); }
void set_relaxed (short v_) { hb_atomic_int_impl_set_relaxed (&v, v_); }
void set_release (short v_) { hb_atomic_int_impl_set (&v, v_); }
short get_relaxed () const { return hb_atomic_int_impl_get_relaxed (&v); }
short get_acquire () const { return hb_atomic_int_impl_get (&v); }
short inc () { return hb_atomic_int_impl_add (&v, 1); }
short dec () { return hb_atomic_int_impl_add (&v, -1); }
short v = 0;
};
struct hb_atomic_int_t
{
hb_atomic_int_t () = default;
constexpr hb_atomic_int_t (int v) : v (v) {}
hb_atomic_int_t& operator = (int v_) { set_relaxed (v_); return *this; }
operator int () const { return get_relaxed (); }
void set_relaxed (int v_) { hb_atomic_int_impl_set_relaxed (&v, v_); }
void set (int v_) { hb_atomic_int_impl_set (&v, v_); }
void set_release (int v_) { hb_atomic_int_impl_set (&v, v_); }
int get_relaxed () const { return hb_atomic_int_impl_get_relaxed (&v); }
int get () const { return hb_atomic_int_impl_get (&v); }
int get_acquire () const { return hb_atomic_int_impl_get (&v); }
int inc () { return hb_atomic_int_impl_add (&v, 1); }
int dec () { return hb_atomic_int_impl_add (&v, -1); }
@ -167,11 +208,11 @@ struct hb_atomic_ptr_t
void init (T* v_ = nullptr) { set_relaxed (v_); }
void set_relaxed (T* v_) { hb_atomic_ptr_impl_set_relaxed (&v, v_); }
T *get_relaxed () const { return (T *) hb_atomic_ptr_impl_get_relaxed (&v); }
T *get () const { return (T *) hb_atomic_ptr_impl_get ((void **) &v); }
T *get_acquire () const { return (T *) hb_atomic_ptr_impl_get ((void **) &v); }
bool cmpexch (const T *old, T *new_) const { return hb_atomic_ptr_impl_cmpexch ((void **) &v, (void *) old, (void *) new_); }
T * operator -> () const { return get (); }
template <typename C> operator C * () const { return get (); }
T * operator -> () const { return get_acquire (); }
template <typename C> operator C * () const { return get_acquire (); }
T *v = nullptr;
};

View File

@ -30,32 +30,89 @@
#include "hb.hh"
/* Compiler-assisted vectorization. */
/* Type behaving similar to vectorized vars defined using __attribute__((vector_size(...))),
* basically a fixed-size bitset. We can't use the compiler type because hb_vector_t cannot
* guarantee alignment requirements. */
template <typename elt_t, unsigned int byte_size>
struct hb_vector_size_t
{
elt_t& operator [] (unsigned int i) { return v[i]; }
const elt_t& operator [] (unsigned int i) const { return v[i]; }
void init0 ()
{
for (unsigned int i = 0; i < ARRAY_LENGTH (v); i++)
v[i] = 0;
}
void init1 ()
{
for (unsigned int i = 0; i < ARRAY_LENGTH (v); i++)
v[i] = (elt_t) -1;
}
template <typename Op>
hb_vector_size_t process (const Op& op) const
{
hb_vector_size_t r;
for (unsigned int i = 0; i < ARRAY_LENGTH (v); i++)
r.v[i] = op (v[i]);
return r;
}
template <typename Op>
hb_vector_size_t process (const Op& op, const hb_vector_size_t &o) const
{
hb_vector_size_t r;
for (unsigned int i = 0; i < ARRAY_LENGTH (v); i++)
r.v[i] = op (v[i], o.v[i]);
return r;
}
hb_vector_size_t operator | (const hb_vector_size_t &o) const
{ return process (hb_bitwise_or, o); }
hb_vector_size_t operator & (const hb_vector_size_t &o) const
{ return process (hb_bitwise_and, o); }
hb_vector_size_t operator ^ (const hb_vector_size_t &o) const
{ return process (hb_bitwise_xor, o); }
hb_vector_size_t operator ~ () const
{ return process (hb_bitwise_neg); }
hb_array_t<const elt_t> iter () const
{ return hb_array (v); }
private:
static_assert (0 == byte_size % sizeof (elt_t), "");
elt_t v[byte_size / sizeof (elt_t)];
};
struct hb_bit_page_t
{
void init0 () { v.clear (); }
void init1 () { v.clear (0xFF); }
void init0 () { v.init0 (); }
void init1 () { v.init1 (); }
constexpr unsigned len () const
static inline constexpr unsigned len ()
{ return ARRAY_LENGTH_CONST (v); }
bool is_empty () const
{
for (unsigned i = 0; i < len (); i++)
if (v[i])
return false;
return true;
return
+ hb_iter (v)
| hb_none
;
}
uint32_t hash () const
{
uint32_t h = 0;
for (unsigned i = 0; i < len (); i++)
h = h * 31 + hb_hash (v[i]);
return h;
return
+ hb_iter (v)
| hb_reduce ([] (uint32_t h, const elt_t &_) { return h * 31 + hb_hash (_); }, (uint32_t) 0u)
;
}
void add (hb_codepoint_t g) { elt (g) |= mask (g); }
void del (hb_codepoint_t g) { elt (g) &= ~mask (g); }
void set (hb_codepoint_t g, bool v) { if (v) add (g); else del (g); }
void set (hb_codepoint_t g, bool value) { if (value) add (g); else del (g); }
bool get (hb_codepoint_t g) const { return elt (g) & mask (g); }
void add_range (hb_codepoint_t a, hb_codepoint_t b)
@ -69,7 +126,7 @@ struct hb_bit_page_t
*la |= ~(mask (a) - 1);
la++;
memset (la, 0xff, (char *) lb - (char *) la);
hb_memset (la, 0xff, (char *) lb - (char *) la);
*lb |= ((mask (b) << 1) - 1);
}
@ -85,7 +142,7 @@ struct hb_bit_page_t
*la &= mask (a) - 1;
la++;
memset (la, 0, (char *) lb - (char *) la);
hb_memset (la, 0, (char *) lb - (char *) la);
*lb &= ~((mask (b) << 1) - 1);
}
@ -101,13 +158,13 @@ struct hb_bit_page_t
hb_codepoint_t *p,
unsigned int size) const
{
unsigned int start_v = start_value >> ELT_BITS_LOG_2;
unsigned int start_v = start_value / ELT_BITS;
unsigned int start_bit = start_value & ELT_MASK;
unsigned int count = 0;
for (unsigned i = start_v; i < len () && count < size; i++)
{
elt_t bits = v[i];
uint32_t v_base = base | (i << ELT_BITS_LOG_2);
uint32_t v_base = base | (i * ELT_BITS);
for (unsigned int j = start_bit; j < ELT_BITS && count < size; j++)
{
if ((elt_t(1) << j) & bits) {
@ -132,13 +189,13 @@ struct hb_bit_page_t
unsigned int size,
hb_codepoint_t *next_value) const
{
unsigned int start_v = start_value >> ELT_BITS_LOG_2;
unsigned int start_v = start_value / ELT_BITS;
unsigned int start_bit = start_value & ELT_MASK;
unsigned int count = 0;
for (unsigned i = start_v; i < len () && count < size; i++)
{
elt_t bits = v[i];
uint32_t v_offset = i << ELT_BITS_LOG_2;
uint32_t v_offset = i * ELT_BITS;
for (unsigned int j = start_bit; j < ELT_BITS && count < size; j++)
{
if ((elt_t(1) << j) & bits)
@ -161,7 +218,10 @@ struct hb_bit_page_t
bool is_equal (const hb_bit_page_t &other) const
{
return 0 == hb_memcmp (&v, &other.v, sizeof (v));
for (unsigned i = 0; i < len (); i++)
if (v[i] != other.v[i])
return false;
return true;
}
bool is_subset (const hb_bit_page_t &larger_page) const
{
@ -173,10 +233,10 @@ struct hb_bit_page_t
unsigned int get_population () const
{
unsigned int pop = 0;
for (unsigned int i = 0; i < len (); i++)
pop += hb_popcount (v[i]);
return pop;
return
+ hb_iter (v)
| hb_reduce ([] (unsigned pop, const elt_t &_) { return pop + hb_popcount (_); }, 0u)
;
}
bool next (hb_codepoint_t *codepoint) const
@ -250,10 +310,10 @@ struct hb_bit_page_t
static constexpr hb_codepoint_t INVALID = HB_SET_VALUE_INVALID;
typedef unsigned long long elt_t;
static constexpr unsigned PAGE_BITS = 512;
static_assert ((PAGE_BITS & ((PAGE_BITS) - 1)) == 0, "");
static constexpr unsigned PAGE_BITS_LOG_2 = 9;
static constexpr unsigned PAGE_BITS_LOG_2 = 9; // 512 bits
static constexpr unsigned PAGE_BITS = 1 << PAGE_BITS_LOG_2;
static_assert (1 << PAGE_BITS_LOG_2 == PAGE_BITS, "");
static_assert ((PAGE_BITS & ((PAGE_BITS) - 1)) == 0, "");
static constexpr unsigned PAGE_BITMASK = PAGE_BITS - 1;
static unsigned int elt_get_min (const elt_t &elt) { return hb_ctz (elt); }
@ -262,8 +322,6 @@ struct hb_bit_page_t
typedef hb_vector_size_t<elt_t, PAGE_BITS / 8> vector_t;
static constexpr unsigned ELT_BITS = sizeof (elt_t) * 8;
static constexpr unsigned ELT_BITS_LOG_2 = 6;
static_assert (1 << ELT_BITS_LOG_2 == ELT_BITS, "");
static constexpr unsigned ELT_MASK = ELT_BITS - 1;
static constexpr unsigned BITS = sizeof (vector_t) * 8;

View File

@ -74,6 +74,11 @@ struct hb_bit_set_invertible_t
inverted = !inverted;
}
bool is_inverted () const
{
return inverted;
}
bool is_empty () const
{
hb_codepoint_t v = INVALID;
@ -123,10 +128,8 @@ struct hb_bit_set_invertible_t
bool get (hb_codepoint_t g) const { return s.get (g) ^ inverted; }
/* Has interface. */
static constexpr bool SENTINEL = false;
typedef bool value_t;
value_t operator [] (hb_codepoint_t k) const { return get (k); }
bool has (hb_codepoint_t k) const { return (*this)[k] != SENTINEL; }
bool operator [] (hb_codepoint_t k) const { return get (k); }
bool has (hb_codepoint_t k) const { return (*this)[k]; }
/* Predicate. */
bool operator () (hb_codepoint_t k) const { return has (k); }

Some files were not shown because too many files have changed in this diff Show More