[gsubgpos] Optimize set-digest initialization

Previously we were once collecting set-digest for each subtable,
and another time for each lookup.

Now we compute the one for each lookup simply from the ones for
its subtables.
This commit is contained in:
Behdad Esfahbod 2022-11-16 14:39:25 -07:00
parent 20654cd889
commit a053b84cb9
2 changed files with 12 additions and 3 deletions

View File

@ -3938,13 +3938,14 @@ struct hb_ot_layout_lookup_accelerator_t
template <typename TLookup> template <typename TLookup>
void init (const TLookup &lookup) void init (const TLookup &lookup)
{ {
digest.init ();
lookup.collect_coverage (&digest);
subtables.init (); subtables.init ();
OT::hb_accelerate_subtables_context_t c_accelerate_subtables (subtables); OT::hb_accelerate_subtables_context_t c_accelerate_subtables (subtables);
lookup.dispatch (&c_accelerate_subtables); lookup.dispatch (&c_accelerate_subtables);
digest.init ();
for (auto& subtable : hb_iter (subtables))
digest.add (subtable.digest);
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE #ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
cache_user_idx = c_accelerate_subtables.cache_user_idx; cache_user_idx = c_accelerate_subtables.cache_user_idx;
for (unsigned i = 0; i < subtables.length; i++) for (unsigned i = 0; i < subtables.length; i++)

View File

@ -75,6 +75,8 @@ struct hb_set_digest_bits_pattern_t
void init () { mask = 0; } void init () { mask = 0; }
void add (const hb_set_digest_bits_pattern_t &o) { mask |= o.mask; }
void add (hb_codepoint_t g) { mask |= mask_for (g); } void add (hb_codepoint_t g) { mask |= mask_for (g); }
bool add_range (hb_codepoint_t a, hb_codepoint_t b) bool add_range (hb_codepoint_t a, hb_codepoint_t b)
@ -128,6 +130,12 @@ struct hb_set_digest_combiner_t
tail.init (); tail.init ();
} }
void add (const hb_set_digest_combiner_t &o)
{
head.add (o.head);
tail.add (o.tail);
}
void add (hb_codepoint_t g) void add (hb_codepoint_t g)
{ {
head.add (g); head.add (g);