[set] Optimize add_range() some more

It's as good as it gets, and seems to be on par with previous set implementation
in my benchmark.

Would be great if someone can double-check my bitops.
This commit is contained in:
Behdad Esfahbod 2017-12-01 13:56:06 -08:00
parent 438c325a25
commit 9d0194b3a8
1 changed files with 13 additions and 3 deletions

View File

@ -67,9 +67,19 @@ struct hb_set_t
inline void add_range (hb_codepoint_t a, hb_codepoint_t b)
{
/* TODO Speed up. */
for (unsigned int i = a; i < b + 1; i++)
add (i);
elt_t *la = &elt (a);
elt_t *lb = &elt (b);
if (la == lb)
*la |= (mask (b) << 1) - mask(a);
else
{
*la |= ~(mask (a) - 1);
memset (la, 0xff, (char *) lb - (char *) la);
*lb |= ((mask (b) << 1) - 1);
}
}
inline bool is_equal (const page_t *other) const