diff --git a/src/Makefile.sources b/src/Makefile.sources index 64a6274dd..04b9cbe22 100644 --- a/src/Makefile.sources +++ b/src/Makefile.sources @@ -20,6 +20,7 @@ HB_BASE_sources = \ hb-atomic.hh \ hb-bimap.hh \ hb-bit-page.hh \ + hb-bit-set.hh \ hb-blob.cc \ hb-blob.hh \ hb-buffer-serialize.cc \ diff --git a/src/hb-bit-set.hh b/src/hb-bit-set.hh new file mode 100644 index 000000000..8e386b91e --- /dev/null +++ b/src/hb-bit-set.hh @@ -0,0 +1,787 @@ +/* + * Copyright © 2012,2017 Google, Inc. + * Copyright © 2021 Behdad Esfahbod + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + * Google Author(s): Behdad Esfahbod + */ + +#ifndef HB_BIT_SET_HH +#define HB_BIT_SET_HH + +#include "hb.hh" +#include "hb-bit-page.hh" +#include "hb-machinery.hh" + + +struct hb_bit_set_t +{ + hb_bit_set_t () + { + init (); + } + ~hb_bit_set_t () + { + fini (); + } + + void init () + { + successful = true; + population = 0; + last_page_lookup = 0; + page_map.init (); + pages.init (); + } + void fini () + { + page_map.fini (); + pages.fini (); + } + + hb_bit_set_t (const hb_bit_set_t& other) : hb_bit_set_t () { set (other); } + void operator= (const hb_bit_set_t& other) { set (other); } + // TODO Add move construtor/assign + // TODO Add constructor for Iterator; with specialization for (sorted) vector / array? + + /* TODO Keep a freelist so we can release pages that are completely zeroed. At that + * point maybe also use a sentinel value for "all-1" pages? */ + + using page_t = hb_bit_page_t; + struct page_map_t + { + int cmp (const page_map_t &o) const { return cmp (o.major); } + int cmp (uint32_t o_major) const { return (int) o_major - (int) major; } + + uint32_t major; + uint32_t index; + }; + + bool successful; /* Allocations successful */ + mutable unsigned int population; + mutable unsigned int last_page_lookup; + hb_sorted_vector_t page_map; + hb_vector_t pages; + + void err () { if (successful) successful = false; } /* TODO Remove */ + bool in_error () const { return !successful; } + + bool resize (unsigned int count) + { + if (unlikely (count > pages.length && !successful)) return false; + if (!pages.resize (count) || !page_map.resize (count)) + { + pages.resize (page_map.length); + successful = false; + return false; + } + return true; + } + + void reset () + { + successful = true; + clear (); + } + + void clear () + { + if (resize (0)) + population = 0; + } + bool is_empty () const + { + unsigned int count = pages.length; + for (unsigned int i = 0; i < count; i++) + if (!pages[i].is_empty ()) + return false; + return true; + } + explicit operator bool () const { return !is_empty (); } + + private: + void dirty () { population = UINT_MAX; } + public: + + void add (hb_codepoint_t g) + { + if (unlikely (!successful)) return; + if (unlikely (g == INVALID)) return; + dirty (); + page_t *page = page_for_insert (g); if (unlikely (!page)) return; + page->add (g); + } + bool add_range (hb_codepoint_t a, hb_codepoint_t b) + { + if (unlikely (!successful)) return true; /* https://github.com/harfbuzz/harfbuzz/issues/657 */ + if (unlikely (a > b || a == INVALID || b == INVALID)) return false; + dirty (); + unsigned int ma = get_major (a); + unsigned int mb = get_major (b); + if (ma == mb) + { + page_t *page = page_for_insert (a); if (unlikely (!page)) return false; + page->add_range (a, b); + } + else + { + page_t *page = page_for_insert (a); if (unlikely (!page)) return false; + page->add_range (a, major_start (ma + 1) - 1); + + for (unsigned int m = ma + 1; m < mb; m++) + { + page = page_for_insert (major_start (m)); if (unlikely (!page)) return false; + page->init1 (); + } + + page = page_for_insert (b); if (unlikely (!page)) return false; + page->add_range (major_start (mb), b); + } + return true; + } + + template + void add_array (const T *array, unsigned int count, unsigned int stride=sizeof(T)) + { + if (unlikely (!successful)) return; + if (!count) return; + dirty (); + hb_codepoint_t g = *array; + while (count) + { + unsigned int m = get_major (g); + page_t *page = page_for_insert (g); if (unlikely (!page)) return; + unsigned int start = major_start (m); + unsigned int end = major_start (m + 1); + do + { + page->add (g); + + array = &StructAtOffsetUnaligned (array, stride); + count--; + } + while (count && (g = *array, start <= g && g < end)); + } + } + template + void add_array (const hb_array_t& arr) { add_array (&arr, arr.len ()); } + + /* Might return false if array looks unsorted. + * Used for faster rejection of corrupt data. */ + template + bool add_sorted_array (const T *array, unsigned int count, unsigned int stride=sizeof(T)) + { + if (unlikely (!successful)) return true; /* https://github.com/harfbuzz/harfbuzz/issues/657 */ + if (!count) return true; + dirty (); + hb_codepoint_t g = *array; + hb_codepoint_t last_g = g; + while (count) + { + unsigned int m = get_major (g); + page_t *page = page_for_insert (g); if (unlikely (!page)) return false; + unsigned int end = major_start (m + 1); + do + { + /* If we try harder we can change the following comparison to <=; + * Not sure if it's worth it. */ + if (g < last_g) return false; + last_g = g; + page->add (g); + + array = (const T *) ((const char *) array + stride); + count--; + } + while (count && (g = *array, g < end)); + } + return true; + } + template + bool add_sorted_array (const hb_sorted_array_t& arr) { return add_sorted_array (&arr, arr.len ()); } + + void del (hb_codepoint_t g) + { + /* TODO perform op even if !successful. */ + if (unlikely (!successful)) return; + page_t *page = page_for (g); + if (!page) + return; + dirty (); + page->del (g); + } + + private: + void del_pages (int ds, int de) + { + if (ds <= de) + { + // Pre-allocate the workspace that compact() will need so we can bail on allocation failure + // before attempting to rewrite the page map. + hb_vector_t compact_workspace; + if (unlikely (!allocate_compact_workspace (compact_workspace))) return; + + unsigned int write_index = 0; + for (unsigned int i = 0; i < page_map.length; i++) + { + int m = (int) page_map[i].major; + if (m < ds || de < m) + page_map[write_index++] = page_map[i]; + } + compact (compact_workspace, write_index); + resize (write_index); + } + } + + + public: + void del_range (hb_codepoint_t a, hb_codepoint_t b) + { + /* TODO perform op even if !successful. */ + if (unlikely (!successful)) return; + if (unlikely (a > b || a == INVALID)) return; + dirty (); + unsigned int ma = get_major (a); + unsigned int mb = get_major (b); + /* Delete pages from ds through de if ds <= de. */ + int ds = (a == major_start (ma))? (int) ma: (int) (ma + 1); + int de = (b + 1 == major_start (mb + 1))? (int) mb: ((int) mb - 1); + if (ds > de || (int) ma < ds) + { + page_t *page = page_for (a); + if (page) + { + if (ma == mb) + page->del_range (a, b); + else + page->del_range (a, major_start (ma + 1) - 1); + } + } + if (de < (int) mb && ma != mb) + { + page_t *page = page_for (b); + if (page) + page->del_range (major_start (mb), b); + } + del_pages (ds, de); + } + + bool get (hb_codepoint_t g) const + { + const page_t *page = page_for (g); + if (!page) + return false; + return page->get (g); + } + + /* Has interface. */ + static constexpr bool SENTINEL = false; + typedef bool value_t; + value_t operator [] (hb_codepoint_t k) const { return get (k); } + bool has (hb_codepoint_t k) const { return (*this)[k] != SENTINEL; } + /* Predicate. */ + bool operator () (hb_codepoint_t k) const { return has (k); } + + /* Sink interface. */ + hb_bit_set_t& operator << (hb_codepoint_t v) + { add (v); return *this; } + hb_bit_set_t& operator << (const hb_pair_t& range) + { add_range (range.first, range.second); return *this; } + + bool intersects (hb_codepoint_t first, hb_codepoint_t last) const + { + hb_codepoint_t c = first - 1; + return next (&c) && c <= last; + } + void set (const hb_bit_set_t &other) + { + if (unlikely (!successful)) return; + unsigned int count = other.pages.length; + if (!resize (count)) + return; + population = other.population; + + hb_memcpy ((void *) pages, (const void *) other.pages, count * pages.item_size); + hb_memcpy ((void *) page_map, (const void *) other.page_map, count * page_map.item_size); + } + + bool is_equal (const hb_bit_set_t &other) const + { + if (get_population () != other.get_population ()) + return false; + + unsigned int na = pages.length; + unsigned int nb = other.pages.length; + + unsigned int a = 0, b = 0; + for (; a < na && b < nb; ) + { + if (page_at (a).is_empty ()) { a++; continue; } + if (other.page_at (b).is_empty ()) { b++; continue; } + if (page_map[a].major != other.page_map[b].major || + !page_at (a).is_equal (other.page_at (b))) + return false; + a++; + b++; + } + for (; a < na; a++) + if (!page_at (a).is_empty ()) { return false; } + for (; b < nb; b++) + if (!other.page_at (b).is_empty ()) { return false; } + + return true; + } + + bool is_subset (const hb_bit_set_t &larger_set) const + { + /* TODO: Merge this and is_equal() into something like process(). */ + if (unlikely(larger_set.is_empty ())) + return is_empty (); + + uint32_t spi = 0; + for (uint32_t lpi = 0; spi < page_map.length && lpi < larger_set.page_map.length; lpi++) + { + uint32_t spm = page_map[spi].major; + uint32_t lpm = larger_set.page_map[lpi].major; + auto sp = page_at (spi); + auto lp = larger_set.page_at (lpi); + + if (spm < lpm && !sp.is_empty ()) + return false; + + if (lpm < spm) + continue; + + if (!sp.is_subset (lp)) + return false; + + spi++; + } + + while (spi < page_map.length) + if (!page_at (spi++).is_empty ()) + return false; + + return true; + } + + private: + bool allocate_compact_workspace (hb_vector_t& workspace) + { + if (unlikely (!workspace.resize (pages.length))) + { + successful = false; + return false; + } + + return true; + } + + /* + * workspace should be a pre-sized vector allocated to hold at exactly pages.length + * elements. + */ + void compact (hb_vector_t& workspace, + unsigned int length) + { + assert(workspace.length == pages.length); + hb_vector_t& old_index_to_page_map_index = workspace; + + hb_fill (old_index_to_page_map_index.writer(), 0xFFFFFFFF); + /* TODO(iter) Rewrite as dagger? */ + for (unsigned i = 0; i < length; i++) + old_index_to_page_map_index[page_map[i].index] = i; + + compact_pages (old_index_to_page_map_index); + } + void compact_pages (const hb_vector_t& old_index_to_page_map_index) + { + unsigned int write_index = 0; + for (unsigned int i = 0; i < pages.length; i++) + { + if (old_index_to_page_map_index[i] == 0xFFFFFFFF) continue; + + if (write_index < i) + pages[write_index] = pages[i]; + + page_map[old_index_to_page_map_index[i]].index = write_index; + write_index++; + } + } + public: + + template + void process (const Op& op, const hb_bit_set_t &other) + { + const bool passthru_left = op (1, 0); + const bool passthru_right = op (0, 1); + + if (unlikely (!successful)) return; + + dirty (); + + unsigned int na = pages.length; + unsigned int nb = other.pages.length; + unsigned int next_page = na; + + unsigned int count = 0, newCount = 0; + unsigned int a = 0, b = 0; + unsigned int write_index = 0; + + // Pre-allocate the workspace that compact() will need so we can bail on allocation failure + // before attempting to rewrite the page map. + hb_vector_t compact_workspace; + if (!passthru_left && unlikely (!allocate_compact_workspace (compact_workspace))) return; + + for (; a < na && b < nb; ) + { + if (page_map[a].major == other.page_map[b].major) + { + if (!passthru_left) + { + // Move page_map entries that we're keeping from the left side set + // to the front of the page_map vector. This isn't necessary if + // passthru_left is set since no left side pages will be removed + // in that case. + if (write_index < a) + page_map[write_index] = page_map[a]; + write_index++; + } + + count++; + a++; + b++; + } + else if (page_map[a].major < other.page_map[b].major) + { + if (passthru_left) + count++; + a++; + } + else + { + if (passthru_right) + count++; + b++; + } + } + if (passthru_left) + count += na - a; + if (passthru_right) + count += nb - b; + + if (!passthru_left) + { + na = write_index; + next_page = write_index; + compact (compact_workspace, write_index); + } + + if (!resize (count)) + return; + + newCount = count; + + /* Process in-place backward. */ + a = na; + b = nb; + for (; a && b; ) + { + if (page_map[a - 1].major == other.page_map[b - 1].major) + { + a--; + b--; + count--; + page_map[count] = page_map[a]; + page_at (count).v = op (page_at (a).v, other.page_at (b).v); + } + else if (page_map[a - 1].major > other.page_map[b - 1].major) + { + a--; + if (passthru_left) + { + count--; + page_map[count] = page_map[a]; + } + } + else + { + b--; + if (passthru_right) + { + count--; + page_map[count].major = other.page_map[b].major; + page_map[count].index = next_page++; + page_at (count).v = other.page_at (b).v; + } + } + } + if (passthru_left) + while (a) + { + a--; + count--; + page_map[count] = page_map [a]; + } + if (passthru_right) + while (b) + { + b--; + count--; + page_map[count].major = other.page_map[b].major; + page_map[count].index = next_page++; + page_at (count).v = other.page_at (b).v; + } + assert (!count); + if (pages.length > newCount) + // This resize() doesn't need to be checked because we can't get here + // if the set is currently in_error() and this only resizes downwards + // which will always succeed if the set is not in_error(). + resize (newCount); + } + + bool next (hb_codepoint_t *codepoint) const + { + // TODO: this should be merged with prev() as both implementations + // are very similar. + if (unlikely (*codepoint == INVALID)) { + *codepoint = get_min (); + return *codepoint != INVALID; + } + + const auto* page_map_array = page_map.arrayZ; + unsigned int major = get_major (*codepoint); + unsigned int i = last_page_lookup; + + if (unlikely (i >= page_map.length || page_map_array[i].major != major)) + { + page_map.bfind (major, &i, HB_NOT_FOUND_STORE_CLOSEST); + if (i >= page_map.length) { + *codepoint = INVALID; + return false; + } + } + + const auto* pages_array = pages.arrayZ; + const page_map_t ¤t = page_map_array[i]; + if (likely (current.major == major)) + { + if (pages_array[current.index].next (codepoint)) + { + *codepoint += current.major * page_t::PAGE_BITS; + last_page_lookup = i; + return true; + } + i++; + } + + for (; i < page_map.length; i++) + { + const page_map_t ¤t = page_map.arrayZ[i]; + hb_codepoint_t m = pages_array[current.index].get_min (); + if (m != INVALID) + { + *codepoint = current.major * page_t::PAGE_BITS + m; + last_page_lookup = i; + return true; + } + } + last_page_lookup = 0; + *codepoint = INVALID; + return false; + } + bool previous (hb_codepoint_t *codepoint) const + { + if (unlikely (*codepoint == INVALID)) { + *codepoint = get_max (); + return *codepoint != INVALID; + } + + page_map_t map = {get_major (*codepoint), 0}; + unsigned int i; + page_map.bfind (map, &i, HB_NOT_FOUND_STORE_CLOSEST); + if (i < page_map.length && page_map[i].major == map.major) + { + if (pages[page_map[i].index].previous (codepoint)) + { + *codepoint += page_map[i].major * page_t::PAGE_BITS; + return true; + } + } + i--; + for (; (int) i >= 0; i--) + { + hb_codepoint_t m = pages[page_map[i].index].get_max (); + if (m != INVALID) + { + *codepoint = page_map[i].major * page_t::PAGE_BITS + m; + return true; + } + } + *codepoint = INVALID; + return false; + } + bool next_range (hb_codepoint_t *first, hb_codepoint_t *last) const + { + hb_codepoint_t i; + + i = *last; + if (!next (&i)) + { + *last = *first = INVALID; + return false; + } + + /* TODO Speed up. */ + *last = *first = i; + while (next (&i) && i == *last + 1) + (*last)++; + + return true; + } + bool previous_range (hb_codepoint_t *first, hb_codepoint_t *last) const + { + hb_codepoint_t i; + + i = *first; + if (!previous (&i)) + { + *last = *first = INVALID; + return false; + } + + /* TODO Speed up. */ + *last = *first = i; + while (previous (&i) && i == *first - 1) + (*first)--; + + return true; + } + + unsigned int get_population () const + { + if (population != UINT_MAX) + return population; + + unsigned int pop = 0; + unsigned int count = pages.length; + for (unsigned int i = 0; i < count; i++) + pop += pages[i].get_population (); + + population = pop; + return pop; + } + hb_codepoint_t get_min () const + { + unsigned int count = pages.length; + for (unsigned int i = 0; i < count; i++) + if (!page_at (i).is_empty ()) + return page_map[i].major * page_t::PAGE_BITS + page_at (i).get_min (); + return INVALID; + } + hb_codepoint_t get_max () const + { + unsigned int count = pages.length; + for (int i = count - 1; i >= 0; i--) + if (!page_at (i).is_empty ()) + return page_map[(unsigned) i].major * page_t::PAGE_BITS + page_at (i).get_max (); + return INVALID; + } + + static constexpr hb_codepoint_t INVALID = page_t::INVALID; + + /* + * Iterator implementation. + */ + struct iter_t : hb_iter_with_fallback_t + { + static constexpr bool is_sorted_iterator = true; + iter_t (const hb_bit_set_t &s_ = Null (hb_bit_set_t), + bool init = true) : s (&s_), v (INVALID), l(0) + { + if (init) + { + l = s->get_population () + 1; + __next__ (); + } + } + + typedef hb_codepoint_t __item_t__; + hb_codepoint_t __item__ () const { return v; } + bool __more__ () const { return v != INVALID; } + void __next__ () { s->next (&v); if (l) l--; } + void __prev__ () { s->previous (&v); } + unsigned __len__ () const { return l; } + iter_t end () const { return iter_t (*s, false); } + bool operator != (const iter_t& o) const + { return s != o.s || v != o.v; } + + protected: + const hb_bit_set_t *s; + hb_codepoint_t v; + unsigned l; + }; + iter_t iter () const { return iter_t (*this); } + operator iter_t () const { return iter (); } + + protected: + + page_t *page_for_insert (hb_codepoint_t g) + { + page_map_t map = {get_major (g), pages.length}; + unsigned int i; + if (!page_map.bfind (map, &i, HB_NOT_FOUND_STORE_CLOSEST)) + { + if (!resize (pages.length + 1)) + return nullptr; + + pages[map.index].init0 (); + memmove (page_map + i + 1, + page_map + i, + (page_map.length - 1 - i) * page_map.item_size); + page_map[i] = map; + } + return &pages[page_map[i].index]; + } + page_t *page_for (hb_codepoint_t g) + { + page_map_t key = {get_major (g)}; + const page_map_t *found = page_map.bsearch (key); + if (found) + return &pages[found->index]; + return nullptr; + } + const page_t *page_for (hb_codepoint_t g) const + { + page_map_t key = {get_major (g)}; + const page_map_t *found = page_map.bsearch (key); + if (found) + return &pages[found->index]; + return nullptr; + } + page_t &page_at (unsigned int i) { return pages[page_map[i].index]; } + const page_t &page_at (unsigned int i) const { return pages[page_map[i].index]; } + unsigned int get_major (hb_codepoint_t g) const { return g / page_t::PAGE_BITS; } + hb_codepoint_t major_start (unsigned int major) const { return major * page_t::PAGE_BITS; } +}; + + +#endif /* HB_BIT_SET_HH */ diff --git a/src/hb-ot-layout-gsubgpos.hh b/src/hb-ot-layout-gsubgpos.hh index f1c111a4a..a68abb533 100644 --- a/src/hb-ot-layout-gsubgpos.hh +++ b/src/hb-ot-layout-gsubgpos.hh @@ -3719,8 +3719,9 @@ struct GSUBGPOS hb_set_t alternate_feature_indices; if (version.to_int () >= 0x00010001u) (this+featureVars).closure_features (lookup_indices, &alternate_feature_indices); - if (unlikely (alternate_feature_indices.in_error())) { - feature_indices->successful = false; + if (unlikely (alternate_feature_indices.in_error())) + { + feature_indices->err (); return; } #endif diff --git a/src/hb-set.cc b/src/hb-set.cc index 25f29c67b..17bbdc055 100644 --- a/src/hb-set.cc +++ b/src/hb-set.cc @@ -169,7 +169,7 @@ hb_set_get_user_data (hb_set_t *set, hb_bool_t hb_set_allocation_successful (const hb_set_t *set) { - return set->successful; + return !set->in_error (); } /** diff --git a/src/hb-set.hh b/src/hb-set.hh index d47b3e8ca..5c70eeedc 100644 --- a/src/hb-set.hh +++ b/src/hb-set.hh @@ -29,47 +29,17 @@ #define HB_SET_HH #include "hb.hh" -#include "hb-bit-page.hh" -#include "hb-machinery.hh" +#include "hb-bit-set.hh" struct hb_set_t { - hb_set_t () { init (); } - ~hb_set_t () { fini (); } - - hb_set_t (const hb_set_t& other) : hb_set_t () { set (other); } - void operator= (const hb_set_t& other) { set (other); } - // TODO Add move construtor/assign - // TODO Add constructor for Iterator; with specialization for (sorted) vector / array? - - /* TODO Keep a freelist so we can release pages that are completely zeroed. At that - * point maybe also use a sentinel value for "all-1" pages? */ - - using page_t = hb_bit_page_t; - struct page_map_t - { - int cmp (const page_map_t &o) const { return cmp (o.major); } - int cmp (uint32_t o_major) const { return (int) o_major - (int) major; } - - uint32_t major; - uint32_t index; - }; - hb_object_header_t header; - bool successful; /* Allocations successful */ - mutable unsigned int population; - mutable unsigned int last_page_lookup; - hb_sorted_vector_t page_map; - hb_vector_t pages; + hb_bit_set_t s; void init_shallow () { - successful = true; - population = 0; - last_page_lookup = 0; - page_map.init (); - pages.init (); + s.init (); } void init () { @@ -78,10 +48,7 @@ struct hb_set_t } void fini_shallow () { - population = 0; - last_page_lookup = 0; - page_map.fini (); - pages.fini (); + s.fini (); } void fini () { @@ -89,103 +56,23 @@ struct hb_set_t fini_shallow (); } - bool in_error () const { return !successful; } - - bool resize (unsigned int count) - { - if (unlikely (count > pages.length && !successful)) return false; - if (!pages.resize (count) || !page_map.resize (count)) - { - pages.resize (page_map.length); - successful = false; - return false; - } - return true; - } - - void reset () - { - successful = true; - clear (); - } - - void clear () - { - if (resize (0)) - population = 0; - } - bool is_empty () const - { - unsigned int count = pages.length; - for (unsigned int i = 0; i < count; i++) - if (!pages[i].is_empty ()) - return false; - return true; - } explicit operator bool () const { return !is_empty (); } - void dirty () { population = UINT_MAX; } + void err () { s.err (); } + bool in_error () const { return s.in_error (); } - void add (hb_codepoint_t g) - { - if (unlikely (!successful)) return; - if (unlikely (g == INVALID)) return; - dirty (); - page_t *page = page_for_insert (g); if (unlikely (!page)) return; - page->add (g); - } - bool add_range (hb_codepoint_t a, hb_codepoint_t b) - { - if (unlikely (!successful)) return true; /* https://github.com/harfbuzz/harfbuzz/issues/657 */ - if (unlikely (a > b || a == INVALID || b == INVALID)) return false; - dirty (); - unsigned int ma = get_major (a); - unsigned int mb = get_major (b); - if (ma == mb) - { - page_t *page = page_for_insert (a); if (unlikely (!page)) return false; - page->add_range (a, b); - } - else - { - page_t *page = page_for_insert (a); if (unlikely (!page)) return false; - page->add_range (a, major_start (ma + 1) - 1); + void reset () { s.reset (); } - for (unsigned int m = ma + 1; m < mb; m++) - { - page = page_for_insert (major_start (m)); if (unlikely (!page)) return false; - page->init1 (); - } + void clear () { s.clear (); } - page = page_for_insert (b); if (unlikely (!page)) return false; - page->add_range (major_start (mb), b); - } - return true; - } + bool is_empty () const { return s.is_empty (); } + + void add (hb_codepoint_t g) { s.add (g); } + bool add_range (hb_codepoint_t a, hb_codepoint_t b) { return s.add_range (a, b); } template void add_array (const T *array, unsigned int count, unsigned int stride=sizeof(T)) - { - if (unlikely (!successful)) return; - if (!count) return; - dirty (); - hb_codepoint_t g = *array; - while (count) - { - unsigned int m = get_major (g); - page_t *page = page_for_insert (g); if (unlikely (!page)) return; - unsigned int start = major_start (m); - unsigned int end = major_start (m + 1); - do - { - page->add (g); - - array = &StructAtOffsetUnaligned (array, stride); - count--; - } - while (count && (g = *array, start <= g && g < end)); - } - } + { s.add_array (array, count, stride); } template void add_array (const hb_array_t& arr) { add_array (&arr, arr.len ()); } @@ -193,108 +80,14 @@ struct hb_set_t * Used for faster rejection of corrupt data. */ template bool add_sorted_array (const T *array, unsigned int count, unsigned int stride=sizeof(T)) - { - if (unlikely (!successful)) return true; /* https://github.com/harfbuzz/harfbuzz/issues/657 */ - if (!count) return true; - dirty (); - hb_codepoint_t g = *array; - hb_codepoint_t last_g = g; - while (count) - { - unsigned int m = get_major (g); - page_t *page = page_for_insert (g); if (unlikely (!page)) return false; - unsigned int end = major_start (m + 1); - do - { - /* If we try harder we can change the following comparison to <=; - * Not sure if it's worth it. */ - if (g < last_g) return false; - last_g = g; - page->add (g); - - array = (const T *) ((const char *) array + stride); - count--; - } - while (count && (g = *array, g < end)); - } - return true; - } + { return s.add_sorted_array (array, count, stride); } template bool add_sorted_array (const hb_sorted_array_t& arr) { return add_sorted_array (&arr, arr.len ()); } - void del (hb_codepoint_t g) - { - /* TODO perform op even if !successful. */ - if (unlikely (!successful)) return; - page_t *page = page_for (g); - if (!page) - return; - dirty (); - page->del (g); - } + void del (hb_codepoint_t g) { s.del (g); } + void del_range (hb_codepoint_t a, hb_codepoint_t b) { s.del_range (a, b); } - private: - void del_pages (int ds, int de) - { - if (ds <= de) - { - // Pre-allocate the workspace that compact() will need so we can bail on allocation failure - // before attempting to rewrite the page map. - hb_vector_t compact_workspace; - if (unlikely (!allocate_compact_workspace (compact_workspace))) return; - - unsigned int write_index = 0; - for (unsigned int i = 0; i < page_map.length; i++) - { - int m = (int) page_map[i].major; - if (m < ds || de < m) - page_map[write_index++] = page_map[i]; - } - compact (compact_workspace, write_index); - resize (write_index); - } - } - - - public: - void del_range (hb_codepoint_t a, hb_codepoint_t b) - { - /* TODO perform op even if !successful. */ - if (unlikely (!successful)) return; - if (unlikely (a > b || a == INVALID)) return; - dirty (); - unsigned int ma = get_major (a); - unsigned int mb = get_major (b); - /* Delete pages from ds through de if ds <= de. */ - int ds = (a == major_start (ma))? (int) ma: (int) (ma + 1); - int de = (b + 1 == major_start (mb + 1))? (int) mb: ((int) mb - 1); - if (ds > de || (int) ma < ds) - { - page_t *page = page_for (a); - if (page) - { - if (ma == mb) - page->del_range (a, b); - else - page->del_range (a, major_start (ma + 1) - 1); - } - } - if (de < (int) mb && ma != mb) - { - page_t *page = page_for (b); - if (page) - page->del_range (major_start (mb), b); - } - del_pages (ds, de); - } - - bool get (hb_codepoint_t g) const - { - const page_t *page = page_for (g); - if (!page) - return false; - return page->get (g); - } + bool get (hb_codepoint_t g) const { return s.get (g); } /* Has interface. */ static constexpr bool SENTINEL = false; @@ -311,497 +104,39 @@ struct hb_set_t { add_range (range.first, range.second); return *this; } bool intersects (hb_codepoint_t first, hb_codepoint_t last) const - { - hb_codepoint_t c = first - 1; - return next (&c) && c <= last; - } - void set (const hb_set_t &other) - { - if (unlikely (!successful)) return; - unsigned int count = other.pages.length; - if (!resize (count)) - return; - population = other.population; + { return s.intersects (first, last); } - hb_memcpy ((void *) pages, (const void *) other.pages, count * pages.item_size); - hb_memcpy ((void *) page_map, (const void *) other.page_map, count * page_map.item_size); - } + void set (const hb_set_t &other) { s.set (other.s); } - bool is_equal (const hb_set_t &other) const - { - if (get_population () != other.get_population ()) - return false; + bool is_equal (const hb_set_t &other) const { return s.is_equal (other.s); } - unsigned int na = pages.length; - unsigned int nb = other.pages.length; + bool is_subset (const hb_set_t &larger_set) const { return s.is_subset (larger_set.s); } - unsigned int a = 0, b = 0; - for (; a < na && b < nb; ) - { - if (page_at (a).is_empty ()) { a++; continue; } - if (other.page_at (b).is_empty ()) { b++; continue; } - if (page_map[a].major != other.page_map[b].major || - !page_at (a).is_equal (other.page_at (b))) - return false; - a++; - b++; - } - for (; a < na; a++) - if (!page_at (a).is_empty ()) { return false; } - for (; b < nb; b++) - if (!other.page_at (b).is_empty ()) { return false; } + void union_ (const hb_set_t &other) { s.process (hb_bitwise_or, other.s); } + void intersect (const hb_set_t &other) { s.process (hb_bitwise_and, other.s); } + void subtract (const hb_set_t &other) { s.process (hb_bitwise_sub, other.s); } + void symmetric_difference (const hb_set_t &other) { s.process (hb_bitwise_xor, other.s); } - return true; - } - - bool is_subset (const hb_set_t &larger_set) const - { - /* TODO: Merge this and is_equal() into something like process(). */ - if (unlikely(larger_set.is_empty ())) - return is_empty (); - - uint32_t spi = 0; - for (uint32_t lpi = 0; spi < page_map.length && lpi < larger_set.page_map.length; lpi++) - { - uint32_t spm = page_map[spi].major; - uint32_t lpm = larger_set.page_map[lpi].major; - auto sp = page_at (spi); - auto lp = larger_set.page_at (lpi); - - if (spm < lpm && !sp.is_empty ()) - return false; - - if (lpm < spm) - continue; - - if (!sp.is_subset (lp)) - return false; - - spi++; - } - - while (spi < page_map.length) - if (!page_at (spi++).is_empty ()) - return false; - - return true; - } - - bool allocate_compact_workspace (hb_vector_t& workspace) - { - if (unlikely (!workspace.resize (pages.length))) - { - successful = false; - return false; - } - - return true; - } - - - /* - * workspace should be a pre-sized vector allocated to hold at exactly pages.length - * elements. - */ - void compact (hb_vector_t& workspace, - unsigned int length) - { - assert(workspace.length == pages.length); - hb_vector_t& old_index_to_page_map_index = workspace; - - hb_fill (old_index_to_page_map_index.writer(), 0xFFFFFFFF); - /* TODO(iter) Rewrite as dagger? */ - for (unsigned i = 0; i < length; i++) - old_index_to_page_map_index[page_map[i].index] = i; - - compact_pages (old_index_to_page_map_index); - } - - void compact_pages (const hb_vector_t& old_index_to_page_map_index) - { - unsigned int write_index = 0; - for (unsigned int i = 0; i < pages.length; i++) - { - if (old_index_to_page_map_index[i] == 0xFFFFFFFF) continue; - - if (write_index < i) - pages[write_index] = pages[i]; - - page_map[old_index_to_page_map_index[i]].index = write_index; - write_index++; - } - } - - template - void process (const Op& op, const hb_set_t &other) - { - const bool passthru_left = op (1, 0); - const bool passthru_right = op (0, 1); - - if (unlikely (!successful)) return; - - dirty (); - - unsigned int na = pages.length; - unsigned int nb = other.pages.length; - unsigned int next_page = na; - - unsigned int count = 0, newCount = 0; - unsigned int a = 0, b = 0; - unsigned int write_index = 0; - - // Pre-allocate the workspace that compact() will need so we can bail on allocation failure - // before attempting to rewrite the page map. - hb_vector_t compact_workspace; - if (!passthru_left && unlikely (!allocate_compact_workspace (compact_workspace))) return; - - for (; a < na && b < nb; ) - { - if (page_map[a].major == other.page_map[b].major) - { - if (!passthru_left) - { - // Move page_map entries that we're keeping from the left side set - // to the front of the page_map vector. This isn't necessary if - // passthru_left is set since no left side pages will be removed - // in that case. - if (write_index < a) - page_map[write_index] = page_map[a]; - write_index++; - } - - count++; - a++; - b++; - } - else if (page_map[a].major < other.page_map[b].major) - { - if (passthru_left) - count++; - a++; - } - else - { - if (passthru_right) - count++; - b++; - } - } - if (passthru_left) - count += na - a; - if (passthru_right) - count += nb - b; - - if (!passthru_left) - { - na = write_index; - next_page = write_index; - compact (compact_workspace, write_index); - } - - if (!resize (count)) - return; - - newCount = count; - - /* Process in-place backward. */ - a = na; - b = nb; - for (; a && b; ) - { - if (page_map[a - 1].major == other.page_map[b - 1].major) - { - a--; - b--; - count--; - page_map[count] = page_map[a]; - page_at (count).v = op (page_at (a).v, other.page_at (b).v); - } - else if (page_map[a - 1].major > other.page_map[b - 1].major) - { - a--; - if (passthru_left) - { - count--; - page_map[count] = page_map[a]; - } - } - else - { - b--; - if (passthru_right) - { - count--; - page_map[count].major = other.page_map[b].major; - page_map[count].index = next_page++; - page_at (count).v = other.page_at (b).v; - } - } - } - if (passthru_left) - while (a) - { - a--; - count--; - page_map[count] = page_map [a]; - } - if (passthru_right) - while (b) - { - b--; - count--; - page_map[count].major = other.page_map[b].major; - page_map[count].index = next_page++; - page_at (count).v = other.page_at (b).v; - } - assert (!count); - if (pages.length > newCount) - // This resize() doesn't need to be checked because we can't get here - // if the set is currently in_error() and this only resizes downwards - // which will always succeed if the set is not in_error(). - resize (newCount); - } - - void union_ (const hb_set_t &other) - { - process (hb_bitwise_or, other); - } - void intersect (const hb_set_t &other) - { - process (hb_bitwise_and, other); - } - void subtract (const hb_set_t &other) - { - process (hb_bitwise_sub, other); - } - void symmetric_difference (const hb_set_t &other) - { - process (hb_bitwise_xor, other); - } - bool next (hb_codepoint_t *codepoint) const - { - // TODO: this should be merged with prev() as both implementations - // are very similar. - if (unlikely (*codepoint == INVALID)) { - *codepoint = get_min (); - return *codepoint != INVALID; - } - - const auto* page_map_array = page_map.arrayZ; - unsigned int major = get_major (*codepoint); - unsigned int i = last_page_lookup; - - if (unlikely (i >= page_map.length || page_map_array[i].major != major)) - { - page_map.bfind (major, &i, HB_NOT_FOUND_STORE_CLOSEST); - if (i >= page_map.length) { - *codepoint = INVALID; - return false; - } - } - - const auto* pages_array = pages.arrayZ; - const page_map_t ¤t = page_map_array[i]; - if (likely (current.major == major)) - { - if (pages_array[current.index].next (codepoint)) - { - *codepoint += current.major * page_t::PAGE_BITS; - last_page_lookup = i; - return true; - } - i++; - } - - for (; i < page_map.length; i++) - { - const page_map_t ¤t = page_map.arrayZ[i]; - hb_codepoint_t m = pages_array[current.index].get_min (); - if (m != INVALID) - { - *codepoint = current.major * page_t::PAGE_BITS + m; - last_page_lookup = i; - return true; - } - } - last_page_lookup = 0; - *codepoint = INVALID; - return false; - } - bool previous (hb_codepoint_t *codepoint) const - { - if (unlikely (*codepoint == INVALID)) { - *codepoint = get_max (); - return *codepoint != INVALID; - } - - page_map_t map = {get_major (*codepoint), 0}; - unsigned int i; - page_map.bfind (map, &i, HB_NOT_FOUND_STORE_CLOSEST); - if (i < page_map.length && page_map[i].major == map.major) - { - if (pages[page_map[i].index].previous (codepoint)) - { - *codepoint += page_map[i].major * page_t::PAGE_BITS; - return true; - } - } - i--; - for (; (int) i >= 0; i--) - { - hb_codepoint_t m = pages[page_map[i].index].get_max (); - if (m != INVALID) - { - *codepoint = page_map[i].major * page_t::PAGE_BITS + m; - return true; - } - } - *codepoint = INVALID; - return false; - } + bool next (hb_codepoint_t *codepoint) const { return s.next (codepoint); } + bool previous (hb_codepoint_t *codepoint) const { return s.previous (codepoint); } bool next_range (hb_codepoint_t *first, hb_codepoint_t *last) const - { - hb_codepoint_t i; - - i = *last; - if (!next (&i)) - { - *last = *first = INVALID; - return false; - } - - /* TODO Speed up. */ - *last = *first = i; - while (next (&i) && i == *last + 1) - (*last)++; - - return true; - } + { return s.next_range (first, last); } bool previous_range (hb_codepoint_t *first, hb_codepoint_t *last) const - { - hb_codepoint_t i; + { return s.previous_range (first, last); } - i = *first; - if (!previous (&i)) - { - *last = *first = INVALID; - return false; - } + unsigned int get_population () const { return s.get_population (); } + hb_codepoint_t get_min () const { return s.get_min (); } + hb_codepoint_t get_max () const { return s.get_max (); } - /* TODO Speed up. */ - *last = *first = i; - while (previous (&i) && i == *first - 1) - (*first)--; - - return true; - } - - unsigned int get_population () const - { - if (population != UINT_MAX) - return population; - - unsigned int pop = 0; - unsigned int count = pages.length; - for (unsigned int i = 0; i < count; i++) - pop += pages[i].get_population (); - - population = pop; - return pop; - } - hb_codepoint_t get_min () const - { - unsigned int count = pages.length; - for (unsigned int i = 0; i < count; i++) - if (!page_at (i).is_empty ()) - return page_map[i].major * page_t::PAGE_BITS + page_at (i).get_min (); - return INVALID; - } - hb_codepoint_t get_max () const - { - unsigned int count = pages.length; - for (int i = count - 1; i >= 0; i--) - if (!page_at (i).is_empty ()) - return page_map[(unsigned) i].major * page_t::PAGE_BITS + page_at (i).get_max (); - return INVALID; - } - - static constexpr hb_codepoint_t INVALID = HB_SET_VALUE_INVALID; - static_assert (INVALID == page_t::INVALID, ""); + static constexpr hb_codepoint_t INVALID = hb_bit_set_t::INVALID; + static_assert (INVALID == HB_SET_VALUE_INVALID, ""); /* * Iterator implementation. */ - struct iter_t : hb_iter_with_fallback_t - { - static constexpr bool is_sorted_iterator = true; - iter_t (const hb_set_t &s_ = Null (hb_set_t), - bool init = true) : s (&s_), v (INVALID), l(0) - { - if (init) - { - l = s->get_population () + 1; - __next__ (); - } - } - - typedef hb_codepoint_t __item_t__; - hb_codepoint_t __item__ () const { return v; } - bool __more__ () const { return v != INVALID; } - void __next__ () { s->next (&v); if (l) l--; } - void __prev__ () { s->previous (&v); } - unsigned __len__ () const { return l; } - iter_t end () const { return iter_t (*s, false); } - bool operator != (const iter_t& o) const - { return s != o.s || v != o.v; } - - protected: - const hb_set_t *s; - hb_codepoint_t v; - unsigned l; - }; - iter_t iter () const { return iter_t (*this); } + using iter_t = hb_bit_set_t::iter_t; + iter_t iter () const { return iter_t (this->s); } operator iter_t () const { return iter (); } - - protected: - - page_t *page_for_insert (hb_codepoint_t g) - { - page_map_t map = {get_major (g), pages.length}; - unsigned int i; - if (!page_map.bfind (map, &i, HB_NOT_FOUND_STORE_CLOSEST)) - { - if (!resize (pages.length + 1)) - return nullptr; - - pages[map.index].init0 (); - memmove (page_map + i + 1, - page_map + i, - (page_map.length - 1 - i) * page_map.item_size); - page_map[i] = map; - } - return &pages[page_map[i].index]; - } - page_t *page_for (hb_codepoint_t g) - { - page_map_t key = {get_major (g)}; - const page_map_t *found = page_map.bsearch (key); - if (found) - return &pages[found->index]; - return nullptr; - } - const page_t *page_for (hb_codepoint_t g) const - { - page_map_t key = {get_major (g)}; - const page_map_t *found = page_map.bsearch (key); - if (found) - return &pages[found->index]; - return nullptr; - } - page_t &page_at (unsigned int i) { return pages[page_map[i].index]; } - const page_t &page_at (unsigned int i) const { return pages[page_map[i].index]; } - unsigned int get_major (hb_codepoint_t g) const { return g / page_t::PAGE_BITS; } - hb_codepoint_t major_start (unsigned int major) const { return major * page_t::PAGE_BITS; } };