From 9d0194b3a8e0c562249337fa0cf4d72e89334263 Mon Sep 17 00:00:00 2001 From: Behdad Esfahbod Date: Fri, 1 Dec 2017 13:56:06 -0800 Subject: [PATCH] [set] Optimize add_range() some more It's as good as it gets, and seems to be on par with previous set implementation in my benchmark. Would be great if someone can double-check my bitops. --- src/hb-set-private.hh | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/hb-set-private.hh b/src/hb-set-private.hh index 0fe010ff9..a47b17a31 100644 --- a/src/hb-set-private.hh +++ b/src/hb-set-private.hh @@ -67,9 +67,19 @@ struct hb_set_t inline void add_range (hb_codepoint_t a, hb_codepoint_t b) { - /* TODO Speed up. */ - for (unsigned int i = a; i < b + 1; i++) - add (i); + elt_t *la = &elt (a); + elt_t *lb = &elt (b); + if (la == lb) + *la |= (mask (b) << 1) - mask(a); + else + { + *la |= ~(mask (a) - 1); + + memset (la, 0xff, (char *) lb - (char *) la); + + *lb |= ((mask (b) << 1) - 1); + + } } inline bool is_equal (const page_t *other) const