[atomic-int] Add operators for relaxed ops

This commit is contained in:
Behdad Esfahbod 2022-08-03 12:54:03 -06:00
parent 86d1e22d4f
commit f73c15ca6c
11 changed files with 41 additions and 38 deletions

View File

@ -159,6 +159,9 @@ struct hb_atomic_int_t
hb_atomic_int_t () = default; hb_atomic_int_t () = default;
constexpr hb_atomic_int_t (int v) : v (v) {} constexpr hb_atomic_int_t (int v) : v (v) {}
hb_atomic_int_t& operator = (int v_) { set_relaxed (v_); return *this; }
operator int () const { return get_relaxed (); }
void set_relaxed (int v_) { hb_atomic_int_impl_set_relaxed (&v, v_); } void set_relaxed (int v_) { hb_atomic_int_impl_set_relaxed (&v, v_); }
void set_release (int v_) { hb_atomic_int_impl_set (&v, v_); } void set_release (int v_) { hb_atomic_int_impl_set (&v, v_); }
int get_relaxed () const { return hb_atomic_int_impl_get_relaxed (&v); } int get_relaxed () const { return hb_atomic_int_impl_get_relaxed (&v); }

View File

@ -56,7 +56,7 @@ struct hb_bit_set_t
{ {
successful = true; successful = true;
population = 0; population = 0;
last_page_lookup.set_relaxed (0); last_page_lookup = 0;
page_map.init (); page_map.init ();
pages.init (); pages.init ();
} }
@ -614,7 +614,7 @@ struct hb_bit_set_t
const auto* page_map_array = page_map.arrayZ; const auto* page_map_array = page_map.arrayZ;
unsigned int major = get_major (*codepoint); unsigned int major = get_major (*codepoint);
unsigned int i = last_page_lookup.get_relaxed (); unsigned int i = last_page_lookup;
if (unlikely (i >= page_map.length || page_map_array[i].major != major)) if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{ {
@ -632,7 +632,7 @@ struct hb_bit_set_t
if (pages_array[current.index].next (codepoint)) if (pages_array[current.index].next (codepoint))
{ {
*codepoint += current.major * page_t::PAGE_BITS; *codepoint += current.major * page_t::PAGE_BITS;
last_page_lookup.set_relaxed (i); last_page_lookup = i;
return true; return true;
} }
i++; i++;
@ -645,11 +645,11 @@ struct hb_bit_set_t
if (m != INVALID) if (m != INVALID)
{ {
*codepoint = current.major * page_t::PAGE_BITS + m; *codepoint = current.major * page_t::PAGE_BITS + m;
last_page_lookup.set_relaxed (i); last_page_lookup = i;
return true; return true;
} }
} }
last_page_lookup.set_relaxed (0); last_page_lookup = 0;
*codepoint = INVALID; *codepoint = INVALID;
return false; return false;
} }
@ -732,7 +732,7 @@ struct hb_bit_set_t
{ {
const auto* page_map_array = page_map.arrayZ; const auto* page_map_array = page_map.arrayZ;
unsigned int major = get_major (codepoint); unsigned int major = get_major (codepoint);
unsigned int i = last_page_lookup.get_relaxed (); unsigned int i = last_page_lookup;
if (unlikely (i >= page_map.length || page_map_array[i].major != major)) if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{ {
page_map.bfind (major, &i, HB_NOT_FOUND_STORE_CLOSEST); page_map.bfind (major, &i, HB_NOT_FOUND_STORE_CLOSEST);
@ -773,7 +773,7 @@ struct hb_bit_set_t
{ {
const auto* page_map_array = page_map.arrayZ; const auto* page_map_array = page_map.arrayZ;
unsigned int major = get_major (codepoint); unsigned int major = get_major (codepoint);
unsigned int i = last_page_lookup.get_relaxed (); unsigned int i = last_page_lookup;
if (unlikely (i >= page_map.length || page_map_array[i].major != major)) if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{ {
page_map.bfind(major, &i, HB_NOT_FOUND_STORE_CLOSEST); page_map.bfind(major, &i, HB_NOT_FOUND_STORE_CLOSEST);
@ -900,7 +900,7 @@ struct hb_bit_set_t
/* The extra page_map length is necessary; can't just rely on vector here, /* The extra page_map length is necessary; can't just rely on vector here,
* since the next check would be tricked because a null page also has * since the next check would be tricked because a null page also has
* major==0, which we can't distinguish from an actualy major==0 page... */ * major==0, which we can't distinguish from an actualy major==0 page... */
unsigned i = last_page_lookup.get_relaxed (); unsigned i = last_page_lookup;
if (likely (i < page_map.length)) if (likely (i < page_map.length))
{ {
auto &cached_page = page_map.arrayZ[i]; auto &cached_page = page_map.arrayZ[i];
@ -924,7 +924,7 @@ struct hb_bit_set_t
page_map[i] = map; page_map[i] = map;
} }
last_page_lookup.set_relaxed (i); last_page_lookup = i;
return &pages[page_map[i].index]; return &pages[page_map[i].index];
} }
const page_t *page_for (hb_codepoint_t g) const const page_t *page_for (hb_codepoint_t g) const
@ -934,7 +934,7 @@ struct hb_bit_set_t
/* The extra page_map length is necessary; can't just rely on vector here, /* The extra page_map length is necessary; can't just rely on vector here,
* since the next check would be tricked because a null page also has * since the next check would be tricked because a null page also has
* major==0, which we can't distinguish from an actualy major==0 page... */ * major==0, which we can't distinguish from an actualy major==0 page... */
unsigned i = last_page_lookup.get_relaxed (); unsigned i = last_page_lookup;
if (likely (i < page_map.length)) if (likely (i < page_map.length))
{ {
auto &cached_page = page_map.arrayZ[i]; auto &cached_page = page_map.arrayZ[i];
@ -946,7 +946,7 @@ struct hb_bit_set_t
if (!page_map.bfind (key, &i)) if (!page_map.bfind (key, &i))
return nullptr; return nullptr;
last_page_lookup.set_relaxed (i); last_page_lookup = i;
return &pages[page_map[i].index]; return &pages[page_map[i].index];
} }
page_t &page_at (unsigned int i) { return pages[page_map[i].index]; } page_t &page_at (unsigned int i) { return pages[page_map[i].index]; }

View File

@ -45,13 +45,13 @@ struct hb_cache_t
void clear () void clear ()
{ {
for (unsigned i = 0; i < ARRAY_LENGTH (values); i++) for (unsigned i = 0; i < ARRAY_LENGTH (values); i++)
values[i].set_relaxed (-1); values[i] = -1;
} }
bool get (unsigned int key, unsigned int *value) const bool get (unsigned int key, unsigned int *value) const
{ {
unsigned int k = key & ((1u<<cache_bits)-1); unsigned int k = key & ((1u<<cache_bits)-1);
unsigned int v = values[k].get_relaxed (); unsigned int v = values[k];
if ((key_bits + value_bits - cache_bits == 8 * sizeof (hb_atomic_int_t) && v == (unsigned int) -1) || if ((key_bits + value_bits - cache_bits == 8 * sizeof (hb_atomic_int_t) && v == (unsigned int) -1) ||
(v >> value_bits) != (key >> cache_bits)) (v >> value_bits) != (key >> cache_bits))
return false; return false;
@ -65,7 +65,7 @@ struct hb_cache_t
return false; /* Overflows */ return false; /* Overflows */
unsigned int k = key & ((1u<<cache_bits)-1); unsigned int k = key & ((1u<<cache_bits)-1);
unsigned int v = ((key>>cache_bits)<<value_bits) | value; unsigned int v = ((key>>cache_bits)<<value_bits) | value;
values[k].set_relaxed (v); values[k] = v;
return true; return true;
} }

View File

@ -99,7 +99,7 @@ _hb_options_init ()
} }
/* This is idempotent and threadsafe. */ /* This is idempotent and threadsafe. */
_hb_options.set_relaxed (u.i); _hb_options = u.i;
} }

View File

@ -67,12 +67,12 @@ hb_options ()
#endif #endif
/* Make a local copy, so we can access bitfield threadsafely. */ /* Make a local copy, so we can access bitfield threadsafely. */
hb_options_union_t u; hb_options_union_t u;
u.i = _hb_options.get_relaxed (); u.i = _hb_options;
if (unlikely (!u.i)) if (unlikely (!u.i))
{ {
_hb_options_init (); _hb_options_init ();
u.i = _hb_options.get_relaxed (); u.i = _hb_options;
} }
return u.opts; return u.opts;

View File

@ -132,7 +132,7 @@ hb_face_create_for_tables (hb_reference_table_func_t reference_table_func,
face->user_data = user_data; face->user_data = user_data;
face->destroy = destroy; face->destroy = destroy;
face->num_glyphs.set_relaxed (-1); face->num_glyphs = -1;
face->data.init0 (face); face->data.init0 (face);
face->table.init0 (face); face->table.init0 (face);
@ -479,7 +479,7 @@ hb_face_set_upem (hb_face_t *face,
if (hb_object_is_immutable (face)) if (hb_object_is_immutable (face))
return; return;
face->upem.set_relaxed (upem); face->upem = upem;
} }
/** /**
@ -514,7 +514,7 @@ hb_face_set_glyph_count (hb_face_t *face,
if (hb_object_is_immutable (face)) if (hb_object_is_immutable (face))
return; return;
face->num_glyphs.set_relaxed (glyph_count); face->num_glyphs = glyph_count;
} }
/** /**

View File

@ -83,7 +83,7 @@ struct hb_face_t
unsigned int get_upem () const unsigned int get_upem () const
{ {
unsigned int ret = upem.get_relaxed (); unsigned int ret = upem;
if (unlikely (!ret)) if (unlikely (!ret))
{ {
return load_upem (); return load_upem ();
@ -93,7 +93,7 @@ struct hb_face_t
unsigned int get_num_glyphs () const unsigned int get_num_glyphs () const
{ {
unsigned int ret = num_glyphs.get_relaxed (); unsigned int ret = num_glyphs;
if (unlikely (ret == UINT_MAX)) if (unlikely (ret == UINT_MAX))
return load_num_glyphs (); return load_num_glyphs ();
return ret; return ret;

View File

@ -144,14 +144,14 @@ struct hb_reference_count_t
{ {
mutable hb_atomic_int_t ref_count; mutable hb_atomic_int_t ref_count;
void init (int v = 1) { ref_count.set_relaxed (v); } void init (int v = 1) { ref_count = v; }
int get_relaxed () const { return ref_count.get_relaxed (); } int get_relaxed () const { return ref_count; }
int inc () const { return ref_count.inc (); } int inc () const { return ref_count.inc (); }
int dec () const { return ref_count.dec (); } int dec () const { return ref_count.dec (); }
void fini () { ref_count.set_relaxed (-0x0000DEAD); } void fini () { ref_count = -0x0000DEAD; }
bool is_inert () const { return !ref_count.get_relaxed (); } bool is_inert () const { return !ref_count; }
bool is_valid () const { return ref_count.get_relaxed () > 0; } bool is_valid () const { return ref_count > 0; }
}; };
@ -233,7 +233,7 @@ template <typename Type>
static inline void hb_object_init (Type *obj) static inline void hb_object_init (Type *obj)
{ {
obj->header.ref_count.init (); obj->header.ref_count.init ();
obj->header.writable.set_relaxed (true); obj->header.writable = true;
obj->header.user_data.init (); obj->header.user_data.init ();
} }
template <typename Type> template <typename Type>
@ -244,12 +244,12 @@ static inline bool hb_object_is_valid (const Type *obj)
template <typename Type> template <typename Type>
static inline bool hb_object_is_immutable (const Type *obj) static inline bool hb_object_is_immutable (const Type *obj)
{ {
return !obj->header.writable.get_relaxed (); return !obj->header.writable;
} }
template <typename Type> template <typename Type>
static inline void hb_object_make_immutable (const Type *obj) static inline void hb_object_make_immutable (const Type *obj)
{ {
obj->header.writable.set_relaxed (false); obj->header.writable = false;
} }
template <typename Type> template <typename Type>
static inline Type *hb_object_reference (Type *obj) static inline Type *hb_object_reference (Type *obj)

View File

@ -276,7 +276,7 @@ struct indic_shape_plan_t
{ {
bool load_virama_glyph (hb_font_t *font, hb_codepoint_t *pglyph) const bool load_virama_glyph (hb_font_t *font, hb_codepoint_t *pglyph) const
{ {
hb_codepoint_t glyph = virama_glyph.get_relaxed (); hb_codepoint_t glyph = virama_glyph;
if (unlikely (glyph == (hb_codepoint_t) -1)) if (unlikely (glyph == (hb_codepoint_t) -1))
{ {
if (!config->virama || !font->get_nominal_glyph (config->virama, &glyph)) if (!config->virama || !font->get_nominal_glyph (config->virama, &glyph))
@ -286,7 +286,7 @@ struct indic_shape_plan_t
/* Our get_nominal_glyph() function needs a font, so we can't get the virama glyph /* Our get_nominal_glyph() function needs a font, so we can't get the virama glyph
* during shape planning... Instead, overwrite it here. */ * during shape planning... Instead, overwrite it here. */
virama_glyph.set_relaxed ((int) glyph); virama_glyph = (int) glyph;
} }
*pglyph = glyph; *pglyph = glyph;
@ -330,7 +330,7 @@ data_create_indic (const hb_ot_shape_plan_t *plan)
#ifndef HB_NO_UNISCRIBE_BUG_COMPATIBLE #ifndef HB_NO_UNISCRIBE_BUG_COMPATIBLE
indic_plan->uniscribe_bug_compatible = hb_options ().uniscribe_bug_compatible; indic_plan->uniscribe_bug_compatible = hb_options ().uniscribe_bug_compatible;
#endif #endif
indic_plan->virama_glyph.set_relaxed (-1); indic_plan->virama_glyph = -1;
/* Use zero-context would_substitute() matching for new-spec of the main /* Use zero-context would_substitute() matching for new-spec of the main
* Indic scripts, and scripts with one spec only, but not for old-specs. * Indic scripts, and scripts with one spec only, but not for old-specs.
@ -992,7 +992,7 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan,
* class of I_Cat(H) is desired but has been lost. */ * class of I_Cat(H) is desired but has been lost. */
/* We don't call load_virama_glyph(), since we know it's already /* We don't call load_virama_glyph(), since we know it's already
* loaded. */ * loaded. */
hb_codepoint_t virama_glyph = indic_plan->virama_glyph.get_relaxed (); hb_codepoint_t virama_glyph = indic_plan->virama_glyph;
if (virama_glyph) if (virama_glyph)
{ {
for (unsigned int i = start; i < end; i++) for (unsigned int i = start; i < end; i++)

View File

@ -307,12 +307,12 @@ hb_ot_tags_from_language (const char *lang_str,
hb_tag_t lang_tag = hb_tag_from_string (lang_str, first_len); hb_tag_t lang_tag = hb_tag_from_string (lang_str, first_len);
static hb_atomic_int_t last_tag_idx; /* Poor man's cache. */ static hb_atomic_int_t last_tag_idx; /* Poor man's cache. */
unsigned tag_idx = last_tag_idx.get_relaxed (); unsigned tag_idx = last_tag_idx;
if (likely (tag_idx < ot_languages_len && ot_languages[tag_idx].language == lang_tag) || if (likely (tag_idx < ot_languages_len && ot_languages[tag_idx].language == lang_tag) ||
hb_sorted_array (ot_languages, ot_languages_len).bfind (lang_tag, &tag_idx)) hb_sorted_array (ot_languages, ot_languages_len).bfind (lang_tag, &tag_idx))
{ {
last_tag_idx.set_relaxed (tag_idx); last_tag_idx = tag_idx;
unsigned int i; unsigned int i;
while (tag_idx != 0 && while (tag_idx != 0 &&
ot_languages[tag_idx].language == ot_languages[tag_idx - 1].language) ot_languages[tag_idx].language == ot_languages[tag_idx - 1].language)

View File

@ -94,7 +94,7 @@ hb_face_t::load_num_glyphs () const
ret = hb_max (ret, load_num_glyphs_from_maxp (this)); ret = hb_max (ret, load_num_glyphs_from_maxp (this));
num_glyphs.set_relaxed (ret); num_glyphs = ret;
return ret; return ret;
} }
@ -102,7 +102,7 @@ unsigned int
hb_face_t::load_upem () const hb_face_t::load_upem () const
{ {
unsigned int ret = table.head->get_upem (); unsigned int ret = table.head->get_upem ();
upem.set_relaxed (ret); upem = ret;
return ret; return ret;
} }