[repacker] cleanup.
This commit is contained in:
parent
674f0194a3
commit
2a5902ee50
|
@ -46,39 +46,39 @@ struct lookup_size_t
|
|||
unsigned lookup_index;
|
||||
size_t size;
|
||||
unsigned num_subtables;
|
||||
};
|
||||
|
||||
inline int compare_sizes (const void* a, const void* b)
|
||||
{
|
||||
lookup_size_t* size_a = (lookup_size_t*) a;
|
||||
lookup_size_t* size_b = (lookup_size_t*) b;
|
||||
|
||||
double subtables_per_byte_a = (double) size_a->num_subtables / (double) size_a->size;
|
||||
double subtables_per_byte_b = (double) size_b->num_subtables / (double) size_b->size;
|
||||
|
||||
if (subtables_per_byte_a == subtables_per_byte_b) {
|
||||
return size_b->lookup_index - size_a->lookup_index;
|
||||
|
||||
static int cmp (const void* a, const void* b)
|
||||
{
|
||||
return cmp ((const lookup_size_t*) a,
|
||||
(const lookup_size_t*) b);
|
||||
}
|
||||
double cmp = subtables_per_byte_b - subtables_per_byte_a;
|
||||
if (cmp < 0) return -1;
|
||||
if (cmp > 0) return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cmp (const lookup_size_t* a, const lookup_size_t* b)
|
||||
{
|
||||
double subtables_per_byte_a = (double) a->num_subtables / (double) a->size;
|
||||
double subtables_per_byte_b = (double) b->num_subtables / (double) b->size;
|
||||
if (subtables_per_byte_a == subtables_per_byte_b) {
|
||||
return b->lookup_index - a->lookup_index;
|
||||
}
|
||||
|
||||
double cmp = subtables_per_byte_b - subtables_per_byte_a;
|
||||
if (cmp < 0) return -1;
|
||||
if (cmp > 0) return 1;
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
static inline
|
||||
bool _presplit_subtables_if_needed (graph::gsubgpos_graph_context_t& ext_context)
|
||||
{
|
||||
// Algorithm:
|
||||
// 1. Scan all sub-tables and compute their sizes.
|
||||
// 2. For any PairPos subtables that are >64kb:
|
||||
// a. split into two more pieces that are <64kb
|
||||
// b. De-dup subgraphs (optional) (can this be done during serialization?)
|
||||
|
||||
// TODO: Add a split subtables method at the lookup level, it could scan it's subtables and split as
|
||||
// needed.
|
||||
// TODO: rename gsubgpos_graph_context_t to be more general.
|
||||
|
||||
// For each lookup this will check the size of subtables and split them as needed
|
||||
// so that no subtable is at risk of overflowing. (where we support splitting for
|
||||
// that subtable type).
|
||||
//
|
||||
// TODO(grieger): de-dup newly added nodes as necessary. Probably just want a full de-dup
|
||||
// pass after this processing is done. Not super necessary as splits are
|
||||
// only done where overflow is likely, so de-dup probably will get undone
|
||||
// later anyways.
|
||||
for (unsigned lookup_index : ext_context.lookups.keys ())
|
||||
{
|
||||
graph::Lookup* lookup = ext_context.lookups.get(lookup_index);
|
||||
|
@ -98,24 +98,20 @@ bool _promote_extensions_if_needed (graph::gsubgpos_graph_context_t& ext_context
|
|||
{
|
||||
// Simple Algorithm (v1, current):
|
||||
// 1. Calculate how many bytes each non-extension lookup consumes.
|
||||
// 2. Select up to 64k of those to remain as non-extension (greedy, smallest first).
|
||||
// 2. Select up to 64k of those to remain as non-extension (greedy, highest subtables per byte first)
|
||||
// 3. Promote the rest.
|
||||
//
|
||||
// Advanced Algorithm (v2, not implemented):
|
||||
// 1. Perform connected component analysis using lookups as roots.
|
||||
// 2. Compute size of each connected component.
|
||||
// 3. Select up to 64k worth of connected components to remain as non-extensions.
|
||||
// (greedy, smallest first)
|
||||
// (greedy, highest subtables per byte first)
|
||||
// 4. Promote the rest.
|
||||
|
||||
// TODO(garretrieger): support extension demotion, then consider all lookups. Requires advanced algo.
|
||||
// TODO(garretrieger): also support extension promotion during iterative resolution phase, then
|
||||
// we can use a less conservative threshold here.
|
||||
// TODO(grieger): skip this for the 24 bit case.
|
||||
// TODO(grieger): sort by # subtables / size instead (high to low). Goal is to get as many subtables
|
||||
// as possible into space 0 to minimize the number of extension subtables added.
|
||||
// A fully optimal solution will require a backpack problem dynamic programming type
|
||||
// solution.
|
||||
if (!ext_context.lookups) return true;
|
||||
|
||||
hb_vector_t<lookup_size_t> lookup_sizes;
|
||||
|
@ -132,7 +128,7 @@ bool _promote_extensions_if_needed (graph::gsubgpos_graph_context_t& ext_context
|
|||
});
|
||||
}
|
||||
|
||||
lookup_sizes.qsort (compare_sizes);
|
||||
lookup_sizes.qsort ();
|
||||
|
||||
size_t lookup_list_size = ext_context.graph.vertices_[ext_context.lookup_list_index].table_size ();
|
||||
size_t l2_l3_size = lookup_list_size; // Lookup List + Lookups
|
||||
|
|
Loading…
Reference in New Issue