Compare commits

...

19 Commits

Author SHA1 Message Date
Garret Rieger f2b9065246 [repacker] for ext promotion choose lookups from highest subtables per byte to lowest.
Attempts to roughly maximize the number of subtables that are non-ext.
2022-07-25 21:55:03 +00:00
Garret Rieger e4fd5ff727 [repacker] count subtable size in each group of consecutive layers for extension promotion decisions.
Enforce that the following groups are all <64k in size:
- LookupList + Lookups
- Lookups + SubTables
- SubTables + Descendants
2022-07-25 21:31:17 +00:00
Garret Rieger 0a5b69b255 [repacker] when calculating 16bit space size also consider ext lookup subtables. 2022-07-25 20:11:24 +00:00
Garret Rieger fa177b7f33 [repacker] include LookupList size when calculating size of 16bit space for ext promotion decisions. 2022-07-25 19:42:58 +00:00
Garret Rieger 156105b0f0 [repacker] Use extension promotion when repacking invoked via hb-subset-repacker (eg. from fonttools). 2022-07-25 19:16:03 +00:00
Garret Rieger e6dda54c88 [repacker] remove temporary visibility overrides in gsubgpos. 2022-07-22 22:49:40 +00:00
Garret Rieger ffb0f7936f [repacker] add ext promotion test. 2022-07-22 22:36:04 +00:00
Garret Rieger 7ebc92f2e0 [repacker] todo. 2022-07-21 23:12:15 +00:00
Garret Rieger 8c6afa69d5 [repacker] Add basic version of the extension promotion selection algorithm. 2022-07-21 22:50:14 +00:00
Garret Rieger 514fcea0aa [repacker] fix GSTAR sanitize. 2022-07-21 21:57:17 +00:00
Garret Rieger 2f63f0c861 [repacker] support extension promotion in 24bit GSUB/GPOS. 2022-07-21 21:54:42 +00:00
Garret Rieger a8887b2740 [repacker] save buffer reference (not copy). 2022-07-21 21:50:23 +00:00
Garret Rieger ede7b74584 [repacker] add sanitization for GSUB/LookupList/Lookup during extension promotion. 2022-07-21 21:45:04 +00:00
Garret Rieger c61c05e431 [repacker] add make_extension_context_t. 2022-07-21 19:07:55 +00:00
Garret Rieger f2a0e69162 [repacker] size buffer correctly. 2022-07-21 18:36:20 +00:00
Garret Rieger d520a6d522 [repack] fix incorrect extension object bounds. 2022-07-20 18:17:29 +00:00
Garret Rieger 5babfda3f2 [repack] fix new node bounds. 2022-07-20 03:26:29 +00:00
Garret Rieger c20b85cfed [repack] WIP implement extension promotion mechanism. 2022-07-19 23:33:16 +00:00
Garret Rieger d9295d88a7 [repacker] WIP extension promotion implementation. 2022-07-19 21:50:13 +00:00
10 changed files with 678 additions and 13 deletions

View File

@ -348,6 +348,8 @@ HB_SUBSET_sources = \
hb-subset.hh \
hb-repacker.hh \
graph/graph.hh \
graph/gsubgpos-graph.hh \
graph/gsubgpos-graph.cc \
graph/serialize.hh \
$(NULL)

View File

@ -24,6 +24,10 @@
* Google Author(s): Garret Rieger
*/
#include "hb-set.hh"
#include "hb-priority-queue.hh"
#include "hb-serialize.hh"
#ifndef GRAPH_GRAPH_HH
#define GRAPH_GRAPH_HH
@ -107,6 +111,10 @@ struct graph_t
return priority >= 3;
}
size_t table_size () const {
return obj.tail - obj.head;
}
int64_t modified_distance (unsigned order) const
{
// TODO(garretrieger): once priority is high enough, should try
@ -199,7 +207,7 @@ struct graph_t
return vertices_.length - 1;
}
const hb_serialize_context_t::object_t& object(unsigned i) const
const hb_serialize_context_t::object_t& object (unsigned i) const
{
return vertices_[i].obj;
}
@ -310,6 +318,22 @@ struct graph_t
}
}
unsigned index_for_offset(unsigned node_idx, const void* offset) const
{
const auto& node = object (node_idx);
if (offset < node.head || offset >= node.tail) return -1;
for (const auto& link : node.real_links)
{
if (offset != node.head + link.position)
continue;
return link.objidx;
}
return -1;
}
/*
* Assign unique space numbers to each connected subgraph of 24 bit and/or 32 bit offset(s).
* Currently, this is implemented specifically tailored to the structure of a GPOS/GSUB
@ -317,6 +341,8 @@ struct graph_t
*/
bool assign_spaces ()
{
update_parents ();
hb_set_t visited;
hb_set_t roots;
find_space_roots (visited, roots);
@ -458,6 +484,21 @@ struct graph_t
find_subgraph (link.objidx, subgraph);
}
size_t find_subgraph_size (unsigned node_idx, hb_set_t& subgraph, unsigned max_depth = -1)
{
if (subgraph.has (node_idx)) return 0;
subgraph.add (node_idx);
const auto& o = vertices_[node_idx].obj;
size_t size = o.tail - o.head;
if (max_depth == 0)
return size;
for (const auto& link : o.all_links ())
size += find_subgraph_size (link.objidx, subgraph, max_depth - 1);
return size;
}
/*
* Finds the topmost children of 32bit offsets in the subgraph starting
* at node_idx. Found indices are placed into 'found'.
@ -581,6 +622,39 @@ struct graph_t
return true;
}
/*
* Adds a new node to the graph, not connected to anything.
*/
unsigned new_node (char* head, char* tail)
{
positions_invalid = true;
distance_invalid = true;
auto* clone = vertices_.push ();
if (vertices_.in_error ()) {
return -1;
}
clone->obj.head = head;
clone->obj.tail = tail;
clone->distance = 0;
clone->space = 0;
unsigned clone_idx = vertices_.length - 2;
// The last object is the root of the graph, so swap back the root to the end.
// The root's obj idx does change, however since it's root nothing else refers to it.
// all other obj idx's will be unaffected.
hb_swap (vertices_[vertices_.length - 2], *clone);
// Since the root moved, update the parents arrays of all children on the root.
for (const auto& l : root ().obj.all_links ())
vertices_[l.objidx].remap_parent (root_idx () - 1, root_idx ());
return clone_idx;
}
/*
* Raises the sorting priority of all children.
*/

View File

@ -0,0 +1,60 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "gsubgpos-graph.hh"
namespace graph {
make_extension_context_t::make_extension_context_t (hb_tag_t table_tag_,
graph_t& graph_,
hb_vector_t<char>& buffer_)
: table_tag (table_tag_),
graph (graph_),
buffer (buffer_),
lookup_list_index (0),
lookups ()
{
GSTAR* gstar = graph::GSTAR::graph_to_gstar (graph_);
if (gstar) {
gstar->find_lookups (graph, lookups);
lookup_list_index = gstar->get_lookup_list_index (graph_);
}
unsigned extension_size = OT::ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>::static_size;
buffer.alloc (num_non_ext_subtables () * extension_size);
}
unsigned make_extension_context_t::num_non_ext_subtables () {
unsigned count = 0;
for (auto l : lookups.values ())
{
if (l->is_extension (table_tag)) continue;
count += l->number_of_subtables ();
}
return count;
}
}

260
src/graph/gsubgpos-graph.hh Normal file
View File

@ -0,0 +1,260 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "graph.hh"
#include "hb-ot-layout-gsubgpos.hh"
#include "OT/Layout/GSUB/ExtensionSubst.hh"
#ifndef GRAPH_GSUBGPOS_GRAPH_HH
#define GRAPH_GSUBGPOS_GRAPH_HH
namespace graph {
struct Lookup;
struct make_extension_context_t
{
hb_tag_t table_tag;
graph_t& graph;
hb_vector_t<char>& buffer;
unsigned lookup_list_index;
hb_hashmap_t<unsigned, graph::Lookup*> lookups;
HB_INTERNAL make_extension_context_t (hb_tag_t table_tag_,
graph_t& graph_,
hb_vector_t<char>& buffer_);
bool in_error () const
{
return buffer.in_error ();
}
private:
HB_INTERNAL unsigned num_non_ext_subtables ();
};
template<typename T>
struct ExtensionFormat1 : public OT::ExtensionFormat1<T>
{
void reset(unsigned type)
{
this->format = 1;
this->extensionLookupType = type;
this->extensionOffset = 0;
}
};
struct Lookup : public OT::Lookup
{
unsigned number_of_subtables () const
{
return subTable.len;
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::Lookup::min_size) return false;
return vertex_len >= this->get_size ();
}
bool is_extension (hb_tag_t table_tag) const
{
return lookupType == extension_type (table_tag);
}
bool make_extension (make_extension_context_t& c,
unsigned this_index)
{
unsigned type = lookupType;
unsigned ext_type = extension_type (c.table_tag);
if (!ext_type || is_extension (c.table_tag))
{
// NOOP
return true;
}
DEBUG_MSG (SUBSET_REPACK, nullptr,
"Promoting lookup type %u (obj %u) to extension.",
type,
this_index);
for (unsigned i = 0; i < subTable.len; i++)
{
unsigned subtable_index = c.graph.index_for_offset (this_index, &subTable[i]);
if (!make_subtable_extension (c,
this_index,
subtable_index))
return false;
}
lookupType = ext_type;
return true;
}
bool make_subtable_extension (make_extension_context_t& c,
unsigned lookup_index,
unsigned subtable_index)
{
unsigned type = lookupType;
unsigned extension_size = OT::ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>::static_size;
unsigned start = c.buffer.length;
unsigned end = start + extension_size;
if (!c.buffer.resize (c.buffer.length + extension_size))
return false;
ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>* extension =
(ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>*) &c.buffer[start];
extension->reset (type);
unsigned ext_index = c.graph.new_node (&c.buffer.arrayZ[start],
&c.buffer.arrayZ[end]);
if (ext_index == (unsigned) -1) return false;
auto& lookup_vertex = c.graph.vertices_[lookup_index];
for (auto& l : lookup_vertex.obj.real_links.writer ())
{
if (l.objidx == subtable_index)
// Change lookup to point at the extension.
l.objidx = ext_index;
}
// Make extension point at the subtable.
auto& ext_vertex = c.graph.vertices_[ext_index];
auto& subtable_vertex = c.graph.vertices_[subtable_index];
auto* l = ext_vertex.obj.real_links.push ();
l->width = 4;
l->objidx = subtable_index;
l->is_signed = 0;
l->whence = 0;
l->position = 4;
l->bias = 0;
ext_vertex.parents.push (lookup_index);
subtable_vertex.remap_parent (lookup_index, ext_index);
return true;
}
private:
unsigned extension_type (hb_tag_t table_tag) const
{
switch (table_tag)
{
case HB_OT_TAG_GPOS: return 9;
case HB_OT_TAG_GSUB: return 7;
default: return 0;
}
}
};
template <typename T>
struct LookupList : public OT::LookupList<T>
{
bool sanitize (const graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::LookupList<T>::min_size) return false;
return vertex_len >= OT::LookupList<T>::item_size * this->len;
}
};
struct GSTAR : public OT::GSUBGPOS
{
static GSTAR* graph_to_gstar (graph_t& graph)
{
const auto& r = graph.root ();
GSTAR* gstar = (GSTAR*) r.obj.head;
if (!gstar->sanitize (r))
return nullptr;
return gstar;
}
const void* get_lookup_list_field_offset () const
{
switch (u.version.major) {
case 1: return u.version1.get_lookup_list_offset ();
#ifndef HB_NO_BORING_EXPANSION
case 2: return u.version2.get_lookup_list_offset ();
#endif
default: return 0;
}
}
bool sanitize (const graph_t::vertex_t& vertex)
{
int64_t len = vertex.obj.tail - vertex.obj.head;
if (len < OT::GSUBGPOS::min_size) return false;
return len >= get_size ();
}
void find_lookups (graph_t& graph,
hb_hashmap_t<unsigned, Lookup*>& lookups /* OUT */)
{
switch (u.version.major) {
case 1: find_lookups<SmallTypes> (graph, lookups); break;
#ifndef HB_NO_BORING_EXPANSION
case 2: find_lookups<MediumTypes> (graph, lookups); break;
#endif
}
}
unsigned get_lookup_list_index (graph_t& graph)
{
return graph.index_for_offset (graph.root_idx (),
get_lookup_list_field_offset());
}
template<typename Types>
void find_lookups (graph_t& graph,
hb_hashmap_t<unsigned, Lookup*>& lookups /* OUT */)
{
unsigned lookup_list_idx = get_lookup_list_index (graph);
const LookupList<Types>* lookupList =
(const LookupList<Types>*) graph.object (lookup_list_idx).head;
if (!lookupList->sanitize (graph.vertices_[lookup_list_idx]))
return;
for (unsigned i = 0; i < lookupList->len; i++)
{
unsigned lookup_idx = graph.index_for_offset (lookup_list_idx, &(lookupList->arrayZ[i]));
Lookup* lookup = (Lookup*) graph.object (lookup_idx).head;
if (!lookup->sanitize (graph.vertices_[lookup_idx])) continue;
lookups.set (lookup_idx, lookup);
}
}
};
}
#endif /* GRAPH_GSUBGPOS_GRAPH_HH */

View File

@ -1337,7 +1337,7 @@ struct Lookup
return_trace (true);
}
private:
protected:
HBUINT16 lookupType; /* Different enumerations for GSUB and GPOS */
HBUINT16 lookupFlag; /* Lookup qualifiers */
Array16Of<Offset16>

View File

@ -4011,6 +4011,11 @@ struct GSUBGPOSVersion1_2
(version.to_int () >= 0x00010001u ? featureVars.static_size : 0);
}
const typename Types::template OffsetTo<LookupList<Types>>* get_lookup_list_offset () const
{
return &lookupList;
}
template <typename TLookup>
bool sanitize (hb_sanitize_context_t *c) const
{

View File

@ -29,10 +29,9 @@
#include "hb-open-type.hh"
#include "hb-map.hh"
#include "hb-priority-queue.hh"
#include "hb-serialize.hh"
#include "hb-vector.hh"
#include "graph/graph.hh"
#include "graph/gsubgpos-graph.hh"
#include "graph/serialize.hh"
using graph::graph_t;
@ -42,6 +41,124 @@ using graph::graph_t;
* docs/repacker.md
*/
struct lookup_size_t
{
unsigned lookup_index;
size_t size;
unsigned num_subtables;
};
inline int compare_sizes (const void* a, const void* b)
{
lookup_size_t* size_a = (lookup_size_t*) a;
lookup_size_t* size_b = (lookup_size_t*) b;
double subtables_per_byte_a = (double) size_a->num_subtables / (double) size_a->size;
double subtables_per_byte_b = (double) size_b->num_subtables / (double) size_b->size;
if (subtables_per_byte_a == subtables_per_byte_b) {
return size_b->lookup_index - size_b->lookup_index;
}
double cmp = subtables_per_byte_b - subtables_per_byte_a;
if (cmp < 0) return -1;
if (cmp > 0) return 1;
return 0;
}
/*
* Analyze the lookups in a GSUB/GPOS table and decide if any should be promoted
* to extension lookups.
*/
static inline
bool _promote_extensions_if_needed (graph::make_extension_context_t& ext_context)
{
// Simple Algorithm (v1, current):
// 1. Calculate how many bytes each non-extension lookup consumes.
// 2. Select up to 64k of those to remain as non-extension (greedy, smallest first).
// 3. Promote the rest.
//
// Advanced Algorithm (v2, not implemented):
// 1. Perform connected component analysis using lookups as roots.
// 2. Compute size of each connected component.
// 3. Select up to 64k worth of connected components to remain as non-extensions.
// (greedy, smallest first)
// 4. Promote the rest.
// TODO(garretrieger): support extension demotion, then consider all lookups. Requires advanced algo.
// TODO(garretrieger): also support extension promotion during iterative resolution phase, then
// we can use a less conservative threshold here.
// TODO(grieger): skip this for the 24 bit case.
// TODO(grieger): sort by # subtables / size instead (high to low). Goal is to get as many subtables
// as possible into space 0 to minimize the number of extension subtables added.
// A fully optimal solution will require a backpack problem dynamic programming type
// solution.
if (!ext_context.lookups) return true;
hb_vector_t<lookup_size_t> lookup_sizes;
lookup_sizes.alloc (ext_context.lookups.get_population ());
for (unsigned lookup_index : ext_context.lookups.keys ())
{
const graph::Lookup* lookup = ext_context.lookups.get(lookup_index);
hb_set_t visited;
lookup_sizes.push (lookup_size_t {
lookup_index,
ext_context.graph.find_subgraph_size (lookup_index, visited),
lookup->number_of_subtables (),
});
}
lookup_sizes.qsort (compare_sizes);
size_t lookup_list_size = ext_context.graph.vertices_[ext_context.lookup_list_index].table_size ();
size_t l2_l3_size = lookup_list_size; // Lookup List + Lookups
size_t l3_l4_size = 0; // Lookups + SubTables
size_t l4_plus_size = 0; // SubTables + their descendants
// Start by assuming all lookups are using extension subtables, this size will be removed later
// if it's decided to not make a lookup extension.
for (auto p : lookup_sizes)
{
unsigned subtables_size = p.num_subtables * 8;
l3_l4_size += subtables_size;
l4_plus_size += subtables_size;
}
bool layers_full = false;
for (auto p : lookup_sizes)
{
const graph::Lookup* lookup = ext_context.lookups.get(p.lookup_index);
if (lookup->is_extension (ext_context.table_tag))
// already an extension so size is counted by the loop above.
continue;
if (!layers_full)
{
size_t lookup_size = ext_context.graph.vertices_[p.lookup_index].table_size ();
hb_set_t visited;
size_t subtables_size = ext_context.graph.find_subgraph_size (p.lookup_index, visited, 1) - lookup_size;
size_t remaining_size = p.size - subtables_size - lookup_size;
l2_l3_size += lookup_size;
l3_l4_size += lookup_size + subtables_size;
l3_l4_size -= p.num_subtables * 8;
l4_plus_size += subtables_size + remaining_size;
if (l2_l3_size < (1 << 16)
&& l3_l4_size < (1 << 16)
&& l4_plus_size < (1 << 16)) continue; // this lookup fits within all layers groups
layers_full = true;
}
if (!ext_context.lookups.get(p.lookup_index)->make_extension (ext_context, p.lookup_index))
return false;
}
return true;
}
static inline
bool _try_isolating_subgraphs (const hb_vector_t<graph::overflow_record_t>& overflows,
graph_t& sorted_graph)
@ -157,7 +274,8 @@ template<typename T>
inline hb_blob_t*
hb_resolve_overflows (const T& packed,
hb_tag_t table_tag,
unsigned max_rounds = 20) {
unsigned max_rounds = 20,
bool recalculate_extensions = false) {
graph_t sorted_graph (packed);
sorted_graph.sort_shortest_distance ();
@ -167,10 +285,24 @@ hb_resolve_overflows (const T& packed,
return graph::serialize (sorted_graph);
}
hb_vector_t<char> extension_buffer; // Needs to live until serialization is done.
if ((table_tag == HB_OT_TAG_GPOS
|| table_tag == HB_OT_TAG_GSUB)
&& will_overflow)
{
if (recalculate_extensions)
{
graph::make_extension_context_t ext_context (table_tag, sorted_graph, extension_buffer);
if (ext_context.in_error ())
return nullptr;
if (!_promote_extensions_if_needed (ext_context)) {
DEBUG_MSG (SUBSET_REPACK, nullptr, "Extensions promotion failed.");
return nullptr;
}
}
DEBUG_MSG (SUBSET_REPACK, nullptr, "Assigning spaces to 32 bit subgraphs.");
if (sorted_graph.assign_spaces ())
sorted_graph.sort_shortest_distance ();

View File

@ -44,6 +44,9 @@ hb_blob_t* hb_subset_repack_or_fail (hb_object_t* hb_objects, unsigned num_hb_ob
for (unsigned i = 0 ; i < num_hb_objs ; i++)
packed.push (&(hb_objects[i]));
return hb_resolve_overflows (packed, HB_OT_TAG_GSUB);
return hb_resolve_overflows (packed,
HB_OT_TAG_GSUB,
20,
true);
}
#endif

View File

@ -345,6 +345,7 @@ hb_subset_sources = files(
'hb-subset-plan.cc',
'hb-subset-plan.hh',
'hb-subset-repacker.cc',
'graph/gsubgpos-graph.cc',
'hb-subset.cc',
'hb-subset.hh',
)
@ -565,7 +566,7 @@ if get_option('tests').enabled()
'test-number': ['test-number.cc', 'hb-number.cc'],
'test-ot-tag': ['hb-ot-tag.cc'],
'test-priority-queue': ['test-priority-queue.cc', 'hb-static.cc'],
'test-repacker': ['test-repacker.cc', 'hb-static.cc'],
'test-repacker': ['test-repacker.cc', 'hb-static.cc', 'graph/gsubgpos-graph.cc'],
'test-set': ['test-set.cc', 'hb-static.cc'],
'test-serialize': ['test-serialize.cc', 'hb-static.cc'],
'test-unicode-ranges': ['test-unicode-ranges.cc'],

View File

@ -30,16 +30,22 @@
#include "hb-open-type.hh"
#include "graph/serialize.hh"
static void extend (const char* value,
unsigned len,
hb_serialize_context_t* c)
{
char* obj = c->allocate_size<char> (len);
memcpy (obj, value, len);
}
static void start_object(const char* tag,
unsigned len,
hb_serialize_context_t* c)
{
c->push ();
char* obj = c->allocate_size<char> (len);
strncpy (obj, tag, len);
extend (tag, len, c);
}
static unsigned add_object(const char* tag,
unsigned len,
hb_serialize_context_t* c)
@ -76,7 +82,8 @@ static void add_wide_offset (unsigned id,
static void run_resolve_overflow_test (const char* name,
hb_serialize_context_t& overflowing,
hb_serialize_context_t& expected,
unsigned num_iterations = 0)
unsigned num_iterations = 0,
bool recalculate_extensions = false)
{
printf (">>> Testing overflowing resolution for %s\n",
name);
@ -86,7 +93,9 @@ static void run_resolve_overflow_test (const char* name,
assert (overflowing.offset_overflow ());
hb_blob_t* out = hb_resolve_overflows (overflowing.object_graph (),
HB_TAG ('G', 'S', 'U', 'B'), num_iterations);
HB_TAG ('G', 'S', 'U', 'B'),
num_iterations,
recalculate_extensions);
assert (out);
hb_bytes_t result = out->as_bytes ();
@ -95,11 +104,21 @@ static void run_resolve_overflow_test (const char* name,
hb_bytes_t expected_result = expected.copy_bytes ();
assert (result.length == expected_result.length);
bool equal = true;
for (unsigned i = 0; i < expected_result.length; i++)
{
assert (result[i] == expected_result[i]);
if (result[i] != expected_result[i])
{
equal = false;
uint8_t a = result[i];
uint8_t b = expected_result[i];
printf("%08u: %x != %x\n", i, a, b);
}
}
assert (equal);
expected_result.fini ();
hb_blob_destroy (out);
}
@ -865,6 +884,89 @@ populate_serializer_with_24_and_32_bit_offsets (hb_serialize_context_t* c)
c->end_serialize();
}
static void
populate_serializer_with_extension_promotion (hb_serialize_context_t* c,
int num_extensions = 0)
{
constexpr int num_lookups = 5;
constexpr int num_subtables = num_lookups * 2;
unsigned int lookups[num_lookups];
unsigned int subtables[num_subtables];
unsigned int extensions[num_subtables];
std::string large_string(60000, 'a');
c->start_serialize<char> ();
for (int i = num_subtables - 1; i >= 0; i--)
subtables[i] = add_object(large_string.c_str (), 15000, c);
for (int i = num_subtables - 1;
i >= (num_lookups - num_extensions) * 2;
i--)
{
char ext[] = {
0, 1,
0, 5
};
unsigned ext_index = i - (num_lookups - num_extensions) * 2; // 5
unsigned subtable_index = num_subtables - ext_index - 1; // 10 - 5 - 1 = 4
start_object (ext, 4, c);
add_wide_offset (subtables[subtable_index], c);
extensions[i] = c->pop_pack (false);
}
for (int i = num_lookups - 1; i >= 0; i--)
{
bool is_ext = (i >= (num_lookups - num_extensions));
char lookup[] = {
0, is_ext ? (char) 7 : (char) 5, // type
0, 0, // flag
0, 2, // num subtables
};
start_object (lookup, 6, c);
if (is_ext) {
add_offset (extensions[i * 2], c);
add_offset (extensions[i * 2 + 1], c);
} else {
add_offset (subtables[i * 2], c);
add_offset (subtables[i * 2 + 1], c);
}
char filter[] = {0, 0};
extend (filter, 2, c);
lookups[i] = c->pop_pack (false);
}
char lookup_count[] = {0, num_lookups};
start_object ((char *) &lookup_count, 2, c);
for (int i = 0; i < num_lookups; i++)
add_offset (lookups[i], c);
unsigned lookup_list = c->pop_pack (false);
char gsub_header[] = {
0, 1, // major
0, 0, // minor
0, 0, // script list
0, 0, // feature list
};
start_object (gsub_header, 8, c);
add_offset (lookup_list, c);
c->pop_pack (false);
c->end_serialize();
}
static void test_sort_shortest ()
{
size_t buffer_size = 100;
@ -1212,6 +1314,28 @@ static void test_resolve_mixed_overflows_via_isolation_spaces ()
hb_blob_destroy (out);
}
static void test_resolve_with_extension_promotion ()
{
size_t buffer_size = 200000;
void* buffer = malloc (buffer_size);
assert (buffer);
hb_serialize_context_t c (buffer, buffer_size);
populate_serializer_with_extension_promotion (&c);
void* expected_buffer = malloc (buffer_size);
assert (expected_buffer);
hb_serialize_context_t e (expected_buffer, buffer_size);
populate_serializer_with_extension_promotion (&e, 3);
run_resolve_overflow_test ("test_resolve_with_extension_promotion",
c,
e,
20,
true);
free (buffer);
free (expected_buffer);
}
static void test_resolve_overflows_via_splitting_spaces ()
{
size_t buffer_size = 160000;
@ -1358,4 +1482,8 @@ main (int argc, char **argv)
test_duplicate_interior ();
test_virtual_link ();
test_shared_node_with_virtual_links ();
test_resolve_with_extension_promotion ();
// TODO(grieger): test with extensions already mixed in as well.
// TODO(grieger): test two layer ext promotion setup.
// TODO(grieger): test sorting by subtables per byte in ext. promotion.
}