2020-10-29 01:49:09 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2020 Google, Inc.
|
|
|
|
*
|
|
|
|
* This is part of HarfBuzz, a text shaping library.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, without written agreement and without
|
|
|
|
* license or royalty fees, to use, copy, modify, and distribute this
|
|
|
|
* software and its documentation for any purpose, provided that the
|
|
|
|
* above copyright notice and the following two paragraphs appear in
|
|
|
|
* all copies of this software.
|
|
|
|
*
|
|
|
|
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
|
|
|
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
|
|
|
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
|
|
|
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
|
|
|
* DAMAGE.
|
|
|
|
*
|
|
|
|
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
|
|
|
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
|
|
|
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
|
|
|
*
|
|
|
|
* Google Author(s): Garret Rieger
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef HB_REPACKER_HH
|
|
|
|
#define HB_REPACKER_HH
|
|
|
|
|
|
|
|
#include "hb-open-type.hh"
|
2020-10-29 22:58:34 +01:00
|
|
|
#include "hb-map.hh"
|
2020-10-29 01:49:09 +01:00
|
|
|
#include "hb-vector.hh"
|
2022-06-24 20:58:17 +02:00
|
|
|
#include "graph/graph.hh"
|
2022-07-19 23:50:13 +02:00
|
|
|
#include "graph/gsubgpos-graph.hh"
|
2022-06-24 20:58:17 +02:00
|
|
|
#include "graph/serialize.hh"
|
|
|
|
|
|
|
|
using graph::graph_t;
|
2020-10-29 01:49:09 +01:00
|
|
|
|
2021-10-12 22:13:32 +02:00
|
|
|
/*
|
|
|
|
* For a detailed writeup on the overflow resolution algorithm see:
|
2021-10-13 01:11:25 +02:00
|
|
|
* docs/repacker.md
|
2021-10-12 22:13:32 +02:00
|
|
|
*/
|
2020-10-29 01:49:09 +01:00
|
|
|
|
2022-07-25 23:55:03 +02:00
|
|
|
struct lookup_size_t
|
|
|
|
{
|
|
|
|
unsigned lookup_index;
|
|
|
|
size_t size;
|
|
|
|
unsigned num_subtables;
|
|
|
|
};
|
|
|
|
|
2022-07-22 00:50:14 +02:00
|
|
|
inline int compare_sizes (const void* a, const void* b)
|
|
|
|
{
|
2022-07-25 23:55:03 +02:00
|
|
|
lookup_size_t* size_a = (lookup_size_t*) a;
|
|
|
|
lookup_size_t* size_b = (lookup_size_t*) b;
|
|
|
|
|
|
|
|
double subtables_per_byte_a = (double) size_a->num_subtables / (double) size_a->size;
|
|
|
|
double subtables_per_byte_b = (double) size_b->num_subtables / (double) size_b->size;
|
|
|
|
|
|
|
|
if (subtables_per_byte_a == subtables_per_byte_b) {
|
2022-07-25 23:59:57 +02:00
|
|
|
return size_b->lookup_index - size_a->lookup_index;
|
2022-07-25 23:55:03 +02:00
|
|
|
|
2022-07-22 23:04:34 +02:00
|
|
|
}
|
2022-07-25 23:55:03 +02:00
|
|
|
double cmp = subtables_per_byte_b - subtables_per_byte_a;
|
|
|
|
if (cmp < 0) return -1;
|
|
|
|
if (cmp > 0) return 1;
|
|
|
|
return 0;
|
2022-07-22 00:50:14 +02:00
|
|
|
}
|
|
|
|
|
2022-07-21 23:45:04 +02:00
|
|
|
/*
|
|
|
|
* Analyze the lookups in a GSUB/GPOS table and decide if any should be promoted
|
|
|
|
* to extension lookups.
|
|
|
|
*/
|
2022-07-21 20:36:20 +02:00
|
|
|
static inline
|
2022-07-21 23:45:04 +02:00
|
|
|
bool _promote_extensions_if_needed (graph::make_extension_context_t& ext_context)
|
2022-07-19 23:50:13 +02:00
|
|
|
{
|
2022-07-22 00:50:14 +02:00
|
|
|
// Simple Algorithm (v1, current):
|
|
|
|
// 1. Calculate how many bytes each non-extension lookup consumes.
|
|
|
|
// 2. Select up to 64k of those to remain as non-extension (greedy, smallest first).
|
|
|
|
// 3. Promote the rest.
|
|
|
|
//
|
|
|
|
// Advanced Algorithm (v2, not implemented):
|
|
|
|
// 1. Perform connected component analysis using lookups as roots.
|
|
|
|
// 2. Compute size of each connected component.
|
|
|
|
// 3. Select up to 64k worth of connected components to remain as non-extensions.
|
|
|
|
// (greedy, smallest first)
|
|
|
|
// 4. Promote the rest.
|
|
|
|
|
|
|
|
// TODO(garretrieger): support extension demotion, then consider all lookups. Requires advanced algo.
|
2022-07-22 01:12:15 +02:00
|
|
|
// TODO(garretrieger): also support extension promotion during iterative resolution phase, then
|
|
|
|
// we can use a less conservative threshold here.
|
2022-07-25 22:46:49 +02:00
|
|
|
// TODO(grieger): skip this for the 24 bit case.
|
|
|
|
// TODO(grieger): sort by # subtables / size instead (high to low). Goal is to get as many subtables
|
|
|
|
// as possible into space 0 to minimize the number of extension subtables added.
|
|
|
|
// A fully optimal solution will require a backpack problem dynamic programming type
|
|
|
|
// solution.
|
2022-07-25 21:42:58 +02:00
|
|
|
if (!ext_context.lookups) return true;
|
|
|
|
|
2022-07-25 23:55:03 +02:00
|
|
|
hb_vector_t<lookup_size_t> lookup_sizes;
|
2022-07-22 00:50:14 +02:00
|
|
|
lookup_sizes.alloc (ext_context.lookups.get_population ());
|
|
|
|
|
|
|
|
for (unsigned lookup_index : ext_context.lookups.keys ())
|
2022-07-19 23:50:13 +02:00
|
|
|
{
|
2022-07-25 23:55:03 +02:00
|
|
|
const graph::Lookup* lookup = ext_context.lookups.get(lookup_index);
|
2022-07-22 00:50:14 +02:00
|
|
|
hb_set_t visited;
|
2022-07-25 23:55:03 +02:00
|
|
|
lookup_sizes.push (lookup_size_t {
|
2022-07-22 00:50:14 +02:00
|
|
|
lookup_index,
|
2022-07-25 23:55:03 +02:00
|
|
|
ext_context.graph.find_subgraph_size (lookup_index, visited),
|
|
|
|
lookup->number_of_subtables (),
|
2022-07-22 00:50:14 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
lookup_sizes.qsort (compare_sizes);
|
|
|
|
|
2022-07-25 22:46:49 +02:00
|
|
|
size_t lookup_list_size = ext_context.graph.vertices_[ext_context.lookup_list_index].table_size ();
|
|
|
|
size_t l2_l3_size = lookup_list_size; // Lookup List + Lookups
|
|
|
|
size_t l3_l4_size = 0; // Lookups + SubTables
|
|
|
|
size_t l4_plus_size = 0; // SubTables + their descendants
|
2022-07-25 21:42:58 +02:00
|
|
|
|
2022-07-25 22:46:49 +02:00
|
|
|
// Start by assuming all lookups are using extension subtables, this size will be removed later
|
|
|
|
// if it's decided to not make a lookup extension.
|
2022-07-22 00:50:14 +02:00
|
|
|
for (auto p : lookup_sizes)
|
|
|
|
{
|
2022-07-25 23:55:03 +02:00
|
|
|
unsigned subtables_size = p.num_subtables * 8;
|
2022-07-25 22:46:49 +02:00
|
|
|
l3_l4_size += subtables_size;
|
|
|
|
l4_plus_size += subtables_size;
|
2022-07-25 22:11:24 +02:00
|
|
|
}
|
|
|
|
|
2022-07-25 22:46:49 +02:00
|
|
|
bool layers_full = false;
|
2022-07-25 22:11:24 +02:00
|
|
|
for (auto p : lookup_sizes)
|
|
|
|
{
|
2022-07-25 23:55:03 +02:00
|
|
|
const graph::Lookup* lookup = ext_context.lookups.get(p.lookup_index);
|
2022-07-25 22:11:24 +02:00
|
|
|
if (lookup->is_extension (ext_context.table_tag))
|
2022-07-25 22:46:49 +02:00
|
|
|
// already an extension so size is counted by the loop above.
|
2022-07-25 22:11:24 +02:00
|
|
|
continue;
|
|
|
|
|
2022-07-25 22:46:49 +02:00
|
|
|
if (!layers_full)
|
|
|
|
{
|
2022-07-25 23:55:03 +02:00
|
|
|
size_t lookup_size = ext_context.graph.vertices_[p.lookup_index].table_size ();
|
2022-07-25 22:46:49 +02:00
|
|
|
hb_set_t visited;
|
2022-07-25 23:55:03 +02:00
|
|
|
size_t subtables_size = ext_context.graph.find_subgraph_size (p.lookup_index, visited, 1) - lookup_size;
|
|
|
|
size_t remaining_size = p.size - subtables_size - lookup_size;
|
2022-07-25 22:46:49 +02:00
|
|
|
|
|
|
|
l2_l3_size += lookup_size;
|
|
|
|
l3_l4_size += lookup_size + subtables_size;
|
2022-07-25 23:55:03 +02:00
|
|
|
l3_l4_size -= p.num_subtables * 8;
|
2022-07-25 22:46:49 +02:00
|
|
|
l4_plus_size += subtables_size + remaining_size;
|
|
|
|
|
|
|
|
if (l2_l3_size < (1 << 16)
|
|
|
|
&& l3_l4_size < (1 << 16)
|
|
|
|
&& l4_plus_size < (1 << 16)) continue; // this lookup fits within all layers groups
|
2022-07-22 00:50:14 +02:00
|
|
|
|
2022-07-25 22:46:49 +02:00
|
|
|
layers_full = true;
|
|
|
|
}
|
2022-07-22 00:50:14 +02:00
|
|
|
|
2022-07-25 23:55:03 +02:00
|
|
|
if (!ext_context.lookups.get(p.lookup_index)->make_extension (ext_context, p.lookup_index))
|
2022-07-20 05:26:29 +02:00
|
|
|
return false;
|
2022-07-19 23:50:13 +02:00
|
|
|
}
|
2022-07-21 23:45:04 +02:00
|
|
|
|
2022-07-20 05:26:29 +02:00
|
|
|
return true;
|
2022-07-19 23:50:13 +02:00
|
|
|
}
|
|
|
|
|
2022-02-02 19:30:34 +01:00
|
|
|
static inline
|
2022-06-24 21:20:20 +02:00
|
|
|
bool _try_isolating_subgraphs (const hb_vector_t<graph::overflow_record_t>& overflows,
|
2022-02-02 19:30:34 +01:00
|
|
|
graph_t& sorted_graph)
|
2021-09-30 03:14:57 +02:00
|
|
|
{
|
2021-12-07 00:23:35 +01:00
|
|
|
unsigned space = 0;
|
|
|
|
hb_set_t roots_to_isolate;
|
|
|
|
|
2021-09-30 03:14:57 +02:00
|
|
|
for (int i = overflows.length - 1; i >= 0; i--)
|
|
|
|
{
|
2022-06-24 21:20:20 +02:00
|
|
|
const graph::overflow_record_t& r = overflows[i];
|
2021-09-30 03:14:57 +02:00
|
|
|
|
2021-12-07 00:23:35 +01:00
|
|
|
unsigned root;
|
|
|
|
unsigned overflow_space = sorted_graph.space_for (r.parent, &root);
|
|
|
|
if (!overflow_space) continue;
|
2021-12-07 01:00:15 +01:00
|
|
|
if (sorted_graph.num_roots_for_space (overflow_space) <= 1) continue;
|
2021-09-30 03:14:57 +02:00
|
|
|
|
2021-12-07 00:23:35 +01:00
|
|
|
if (!space) {
|
|
|
|
space = overflow_space;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (space == overflow_space)
|
|
|
|
roots_to_isolate.add(root);
|
2021-09-30 03:14:57 +02:00
|
|
|
}
|
2021-12-07 00:23:35 +01:00
|
|
|
|
|
|
|
if (!roots_to_isolate) return false;
|
|
|
|
|
2021-12-07 01:00:15 +01:00
|
|
|
unsigned maximum_to_move = hb_max ((sorted_graph.num_roots_for_space (space) / 2u), 1u);
|
|
|
|
if (roots_to_isolate.get_population () > maximum_to_move) {
|
|
|
|
// Only move at most half of the roots in a space at a time.
|
|
|
|
unsigned extra = roots_to_isolate.get_population () - maximum_to_move;
|
|
|
|
while (extra--) {
|
|
|
|
unsigned root = HB_SET_VALUE_INVALID;
|
|
|
|
roots_to_isolate.previous (&root);
|
|
|
|
roots_to_isolate.del (root);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-07 00:23:35 +01:00
|
|
|
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
|
|
|
"Overflow in space %d (%d roots). Moving %d roots to space %d.",
|
|
|
|
space,
|
|
|
|
sorted_graph.num_roots_for_space (space),
|
|
|
|
roots_to_isolate.get_population (),
|
|
|
|
sorted_graph.next_space ());
|
|
|
|
|
|
|
|
sorted_graph.isolate_subgraph (roots_to_isolate);
|
|
|
|
sorted_graph.move_to_new_space (roots_to_isolate);
|
|
|
|
|
|
|
|
return true;
|
2021-09-30 03:14:57 +02:00
|
|
|
}
|
|
|
|
|
2022-02-02 19:30:34 +01:00
|
|
|
static inline
|
2022-06-24 21:20:20 +02:00
|
|
|
bool _process_overflows (const hb_vector_t<graph::overflow_record_t>& overflows,
|
2022-02-02 19:30:34 +01:00
|
|
|
hb_set_t& priority_bumped_parents,
|
|
|
|
graph_t& sorted_graph)
|
2021-09-08 01:52:37 +02:00
|
|
|
{
|
|
|
|
bool resolution_attempted = false;
|
|
|
|
|
|
|
|
// Try resolving the furthest overflows first.
|
|
|
|
for (int i = overflows.length - 1; i >= 0; i--)
|
|
|
|
{
|
2022-06-24 21:20:20 +02:00
|
|
|
const graph::overflow_record_t& r = overflows[i];
|
2021-09-08 01:52:37 +02:00
|
|
|
const auto& child = sorted_graph.vertices_[r.child];
|
|
|
|
if (child.is_shared ())
|
|
|
|
{
|
|
|
|
// The child object is shared, we may be able to eliminate the overflow
|
|
|
|
// by duplicating it.
|
2021-09-10 23:55:24 +02:00
|
|
|
if (!sorted_graph.duplicate (r.parent, r.child)) continue;
|
2021-09-08 01:52:37 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (child.is_leaf () && !priority_bumped_parents.has (r.parent))
|
|
|
|
{
|
|
|
|
// This object is too far from it's parent, attempt to move it closer.
|
|
|
|
//
|
|
|
|
// TODO(garretrieger): initially limiting this to leaf's since they can be
|
|
|
|
// moved closer with fewer consequences. However, this can
|
|
|
|
// likely can be used for non-leafs as well.
|
|
|
|
// TODO(garretrieger): also try lowering priority of the parent. Make it
|
|
|
|
// get placed further up in the ordering, closer to it's children.
|
|
|
|
// this is probably preferable if the total size of the parent object
|
|
|
|
// is < then the total size of the children (and the parent can be moved).
|
|
|
|
// Since in that case moving the parent will cause a smaller increase in
|
|
|
|
// the length of other offsets.
|
2021-12-10 00:44:06 +01:00
|
|
|
if (sorted_graph.raise_childrens_priority (r.parent)) {
|
|
|
|
priority_bumped_parents.add (r.parent);
|
|
|
|
resolution_attempted = true;
|
|
|
|
}
|
2021-09-08 01:52:37 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(garretrieger): add additional offset resolution strategies
|
|
|
|
// - Promotion to extension lookups.
|
|
|
|
// - Table splitting.
|
|
|
|
}
|
|
|
|
|
|
|
|
return resolution_attempted;
|
|
|
|
}
|
2020-10-29 01:49:09 +01:00
|
|
|
|
|
|
|
/*
|
2020-11-11 22:50:18 +01:00
|
|
|
* Attempts to modify the topological sorting of the provided object graph to
|
|
|
|
* eliminate offset overflows in the links between objects of the graph. If a
|
|
|
|
* non-overflowing ordering is found the updated graph is serialized it into the
|
|
|
|
* provided serialization context.
|
|
|
|
*
|
|
|
|
* If necessary the structure of the graph may be modified in ways that do not
|
|
|
|
* affect the functionality of the graph. For example shared objects may be
|
|
|
|
* duplicated.
|
2021-10-12 22:13:32 +02:00
|
|
|
*
|
|
|
|
* For a detailed writeup describing how the algorithm operates see:
|
2021-10-13 01:11:25 +02:00
|
|
|
* docs/repacker.md
|
2020-10-29 01:49:09 +01:00
|
|
|
*/
|
2022-02-02 19:30:34 +01:00
|
|
|
template<typename T>
|
2021-12-06 21:54:19 +01:00
|
|
|
inline hb_blob_t*
|
2022-02-02 19:30:34 +01:00
|
|
|
hb_resolve_overflows (const T& packed,
|
2021-09-08 19:14:00 +02:00
|
|
|
hb_tag_t table_tag,
|
2022-07-22 00:50:14 +02:00
|
|
|
unsigned max_rounds = 20,
|
|
|
|
bool recalculate_extensions = false) {
|
2020-10-29 01:49:09 +01:00
|
|
|
graph_t sorted_graph (packed);
|
2022-06-16 20:12:09 +02:00
|
|
|
sorted_graph.sort_shortest_distance ();
|
|
|
|
|
2022-06-24 21:20:20 +02:00
|
|
|
bool will_overflow = graph::will_overflow (sorted_graph);
|
|
|
|
if (!will_overflow)
|
2020-11-11 22:50:18 +01:00
|
|
|
{
|
2022-06-24 20:58:17 +02:00
|
|
|
return graph::serialize (sorted_graph);
|
2020-11-11 22:50:18 +01:00
|
|
|
}
|
2020-11-06 01:39:23 +01:00
|
|
|
|
2022-07-21 23:45:04 +02:00
|
|
|
hb_vector_t<char> extension_buffer; // Needs to live until serialization is done.
|
2022-07-21 21:07:55 +02:00
|
|
|
|
2021-09-08 19:14:00 +02:00
|
|
|
if ((table_tag == HB_OT_TAG_GPOS
|
|
|
|
|| table_tag == HB_OT_TAG_GSUB)
|
2022-06-24 21:20:20 +02:00
|
|
|
&& will_overflow)
|
2021-09-08 19:14:00 +02:00
|
|
|
{
|
2022-07-22 00:50:14 +02:00
|
|
|
if (recalculate_extensions)
|
|
|
|
{
|
|
|
|
graph::make_extension_context_t ext_context (table_tag, sorted_graph, extension_buffer);
|
|
|
|
if (ext_context.in_error ())
|
|
|
|
return nullptr;
|
2022-07-21 23:45:04 +02:00
|
|
|
|
2022-07-22 00:50:14 +02:00
|
|
|
if (!_promote_extensions_if_needed (ext_context)) {
|
|
|
|
DEBUG_MSG (SUBSET_REPACK, nullptr, "Extensions promotion failed.");
|
|
|
|
return nullptr;
|
|
|
|
}
|
2022-07-20 05:26:29 +02:00
|
|
|
}
|
|
|
|
|
2021-09-29 01:04:27 +02:00
|
|
|
DEBUG_MSG (SUBSET_REPACK, nullptr, "Assigning spaces to 32 bit subgraphs.");
|
2022-07-06 20:44:40 +02:00
|
|
|
if (sorted_graph.assign_spaces ())
|
2021-09-08 19:14:00 +02:00
|
|
|
sorted_graph.sort_shortest_distance ();
|
|
|
|
}
|
|
|
|
|
2020-11-06 01:39:23 +01:00
|
|
|
unsigned round = 0;
|
2022-06-24 21:20:20 +02:00
|
|
|
hb_vector_t<graph::overflow_record_t> overflows;
|
2020-11-06 01:39:23 +01:00
|
|
|
// TODO(garretrieger): select a good limit for max rounds.
|
2020-11-10 23:11:57 +01:00
|
|
|
while (!sorted_graph.in_error ()
|
2022-06-24 21:20:20 +02:00
|
|
|
&& graph::will_overflow (sorted_graph, &overflows)
|
2022-07-14 00:55:58 +02:00
|
|
|
&& round < max_rounds) {
|
2021-09-08 19:14:00 +02:00
|
|
|
DEBUG_MSG (SUBSET_REPACK, nullptr, "=== Overflow resolution round %d ===", round);
|
2022-06-24 21:20:20 +02:00
|
|
|
print_overflows (sorted_graph, overflows);
|
2020-11-06 01:39:23 +01:00
|
|
|
|
2020-11-07 01:22:48 +01:00
|
|
|
hb_set_t priority_bumped_parents;
|
2021-09-30 03:14:57 +02:00
|
|
|
|
|
|
|
if (!_try_isolating_subgraphs (overflows, sorted_graph))
|
2020-11-06 01:39:23 +01:00
|
|
|
{
|
2022-07-14 00:55:58 +02:00
|
|
|
// Don't count space isolation towards round limit. Only increment
|
|
|
|
// round counter if space isolation made no changes.
|
|
|
|
round++;
|
2021-09-30 03:14:57 +02:00
|
|
|
if (!_process_overflows (overflows, priority_bumped_parents, sorted_graph))
|
|
|
|
{
|
|
|
|
DEBUG_MSG (SUBSET_REPACK, nullptr, "No resolution available :(");
|
|
|
|
break;
|
|
|
|
}
|
2020-11-06 01:39:23 +01:00
|
|
|
}
|
|
|
|
|
2021-09-08 01:52:37 +02:00
|
|
|
sorted_graph.sort_shortest_distance ();
|
2020-10-29 01:49:09 +01:00
|
|
|
}
|
|
|
|
|
2020-11-10 23:11:57 +01:00
|
|
|
if (sorted_graph.in_error ())
|
|
|
|
{
|
2021-12-06 21:54:19 +01:00
|
|
|
DEBUG_MSG (SUBSET_REPACK, nullptr, "Sorted graph in error state.");
|
|
|
|
return nullptr;
|
2020-11-10 23:11:57 +01:00
|
|
|
}
|
2021-09-07 22:22:19 +02:00
|
|
|
|
2022-06-24 21:20:20 +02:00
|
|
|
if (graph::will_overflow (sorted_graph))
|
2021-09-07 22:22:19 +02:00
|
|
|
{
|
|
|
|
DEBUG_MSG (SUBSET_REPACK, nullptr, "Offset overflow resolution failed.");
|
2021-12-06 21:54:19 +01:00
|
|
|
return nullptr;
|
2021-09-07 22:22:19 +02:00
|
|
|
}
|
2021-12-06 21:54:19 +01:00
|
|
|
|
2022-06-24 20:58:17 +02:00
|
|
|
return graph::serialize (sorted_graph);
|
2020-10-29 01:49:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* HB_REPACKER_HH */
|