Add a mutex to object header
Removes one more static-initialization. A few more to go.
This commit is contained in:
parent
a2b471df82
commit
0e253e97af
|
@ -314,47 +314,41 @@ hb_script_get_horizontal_direction (hb_script_t script)
|
|||
|
||||
/* hb_user_data_array_t */
|
||||
|
||||
|
||||
/* NOTE: Currently we use a global lock for user_data access
|
||||
* threadsafety. If one day we add a mutex to any object, we
|
||||
* should switch to using that insted for these too.
|
||||
*/
|
||||
|
||||
static hb_static_mutex_t user_data_lock;
|
||||
|
||||
bool
|
||||
hb_user_data_array_t::set (hb_user_data_key_t *key,
|
||||
void * data,
|
||||
hb_destroy_func_t destroy,
|
||||
hb_bool_t replace)
|
||||
hb_bool_t replace,
|
||||
hb_mutex_t &lock)
|
||||
{
|
||||
if (!key)
|
||||
return false;
|
||||
|
||||
if (replace) {
|
||||
if (!data && !destroy) {
|
||||
items.remove (key, user_data_lock);
|
||||
items.remove (key, lock);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
hb_user_data_item_t item = {key, data, destroy};
|
||||
bool ret = !!items.replace_or_insert (item, user_data_lock, replace);
|
||||
bool ret = !!items.replace_or_insert (item, lock, replace);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *
|
||||
hb_user_data_array_t::get (hb_user_data_key_t *key)
|
||||
hb_user_data_array_t::get (hb_user_data_key_t *key,
|
||||
hb_mutex_t &lock)
|
||||
{
|
||||
hb_user_data_item_t item = {NULL };
|
||||
|
||||
return items.find (key, &item, user_data_lock) ? item.data : NULL;
|
||||
return items.find (key, &item, lock) ? item.data : NULL;
|
||||
}
|
||||
|
||||
void
|
||||
hb_user_data_array_t::finish (void)
|
||||
hb_user_data_array_t::finish (hb_mutex_t &lock)
|
||||
{
|
||||
items.finish (user_data_lock);
|
||||
items.finish (lock);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ typedef CRITICAL_SECTION hb_mutex_impl_t;
|
|||
#define hb_mutex_impl_init(M) InitializeCriticalSection (M)
|
||||
#define hb_mutex_impl_lock(M) EnterCriticalSection (M)
|
||||
#define hb_mutex_impl_unlock(M) LeaveCriticalSection (M)
|
||||
#define hb_mutex_impl_free(M) DeleteCriticalSection (M)
|
||||
#define hb_mutex_impl_finish(M) DeleteCriticalSection (M)
|
||||
|
||||
|
||||
#elif !defined(HB_NO_MT) && defined(__APPLE__)
|
||||
|
@ -61,7 +61,7 @@ typedef pthread_mutex_t hb_mutex_impl_t;
|
|||
#define hb_mutex_impl_init(M) pthread_mutex_init (M, NULL)
|
||||
#define hb_mutex_impl_lock(M) pthread_mutex_lock (M)
|
||||
#define hb_mutex_impl_unlock(M) pthread_mutex_unlock (M)
|
||||
#define hb_mutex_impl_free(M) pthread_mutex_destroy (M)
|
||||
#define hb_mutex_impl_finish(M) pthread_mutex_destroy (M)
|
||||
|
||||
|
||||
#elif !defined(HB_NO_MT) && defined(HAVE_GLIB)
|
||||
|
@ -72,7 +72,7 @@ typedef GStaticMutex hb_mutex_impl_t;
|
|||
#define hb_mutex_impl_init(M) g_static_mutex_init (M)
|
||||
#define hb_mutex_impl_lock(M) g_static_mutex_lock (M)
|
||||
#define hb_mutex_impl_unlock(M) g_static_mutex_unlock (M)
|
||||
#define hb_mutex_impl_free(M) g_static_mutex_free (M)
|
||||
#define hb_mutex_impl_finish(M) g_static_mutex_free (M)
|
||||
|
||||
|
||||
#else
|
||||
|
@ -83,11 +83,12 @@ typedef volatile int hb_mutex_impl_t;
|
|||
#define hb_mutex_impl_init(M) ((void) (*(M) = 0))
|
||||
#define hb_mutex_impl_lock(M) ((void) (*(M) = 1))
|
||||
#define hb_mutex_impl_unlock(M) ((void) (*(M) = 0))
|
||||
#define hb_mutex_impl_free(M) ((void) (*(M) = 2))
|
||||
#define hb_mutex_impl_finish(M) ((void) (*(M) = 2))
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#define HB_MUTEX_INIT {HB_MUTEX_IMPL_INIT}
|
||||
struct hb_mutex_t
|
||||
{
|
||||
hb_mutex_impl_t m;
|
||||
|
@ -95,20 +96,14 @@ struct hb_mutex_t
|
|||
inline void init (void) { hb_mutex_impl_init (&m); }
|
||||
inline void lock (void) { hb_mutex_impl_lock (&m); }
|
||||
inline void unlock (void) { hb_mutex_impl_unlock (&m); }
|
||||
inline void free (void) { hb_mutex_impl_free (&m); }
|
||||
inline void finish (void) { hb_mutex_impl_finish (&m); }
|
||||
};
|
||||
|
||||
#define HB_MUTEX_INIT {HB_MUTEX_IMPL_INIT}
|
||||
#define hb_mutex_init(M) (M)->init ()
|
||||
#define hb_mutex_lock(M) (M)->lock ()
|
||||
#define hb_mutex_unlock(M) (M)->unlock ()
|
||||
#define hb_mutex_free(M) (M)->free ()
|
||||
|
||||
|
||||
struct hb_static_mutex_t : hb_mutex_t
|
||||
{
|
||||
hb_static_mutex_t (void) { this->init (); }
|
||||
~hb_static_mutex_t (void) { this->free (); }
|
||||
~hb_static_mutex_t (void) { this->finish (); }
|
||||
|
||||
private:
|
||||
NO_COPY (hb_static_mutex_t);
|
||||
|
|
|
@ -47,16 +47,16 @@
|
|||
|
||||
/* reference_count */
|
||||
|
||||
#define HB_REFERENCE_COUNT_INVALID_VALUE ((hb_atomic_int_t) -1)
|
||||
#define HB_REFERENCE_COUNT_INVALID {HB_REFERENCE_COUNT_INVALID_VALUE}
|
||||
struct hb_reference_count_t
|
||||
{
|
||||
hb_atomic_int_t ref_count;
|
||||
|
||||
#define HB_REFERENCE_COUNT_INVALID_VALUE ((hb_atomic_int_t) -1)
|
||||
#define HB_REFERENCE_COUNT_INVALID {HB_REFERENCE_COUNT_INVALID_VALUE}
|
||||
|
||||
inline void init (int v) { const_cast<hb_atomic_int_t &> (ref_count) = v; }
|
||||
inline void init (int v) { ref_count = v; }
|
||||
inline int inc (void) { return hb_atomic_int_add (const_cast<hb_atomic_int_t &> (ref_count), 1); }
|
||||
inline int dec (void) { return hb_atomic_int_add (const_cast<hb_atomic_int_t &> (ref_count), -1); }
|
||||
inline void finish (void) { ref_count = HB_REFERENCE_COUNT_INVALID_VALUE; }
|
||||
|
||||
inline bool is_invalid (void) const { return ref_count == HB_REFERENCE_COUNT_INVALID_VALUE; }
|
||||
|
||||
|
@ -65,6 +65,7 @@ struct hb_reference_count_t
|
|||
|
||||
/* user_data */
|
||||
|
||||
#define HB_USER_DATA_ARRAY_INIT {HB_LOCKABLE_SET_INIT}
|
||||
struct hb_user_data_array_t
|
||||
{
|
||||
struct hb_user_data_item_t {
|
||||
|
@ -78,18 +79,20 @@ struct hb_user_data_array_t
|
|||
void finish (void) { if (destroy) destroy (data); }
|
||||
};
|
||||
|
||||
hb_lockable_set_t<hb_user_data_item_t, hb_static_mutex_t> items;
|
||||
hb_lockable_set_t<hb_user_data_item_t, hb_mutex_t> items;
|
||||
|
||||
inline void init (void) { items.init (); }
|
||||
|
||||
HB_INTERNAL bool set (hb_user_data_key_t *key,
|
||||
void * data,
|
||||
hb_destroy_func_t destroy,
|
||||
hb_bool_t replace);
|
||||
hb_bool_t replace,
|
||||
hb_mutex_t &lock);
|
||||
|
||||
HB_INTERNAL void *get (hb_user_data_key_t *key);
|
||||
HB_INTERNAL void *get (hb_user_data_key_t *key,
|
||||
hb_mutex_t &lock);
|
||||
|
||||
HB_INTERNAL void finish (void);
|
||||
HB_INTERNAL void finish (hb_mutex_t &lock);
|
||||
};
|
||||
|
||||
|
||||
|
@ -98,9 +101,10 @@ struct hb_user_data_array_t
|
|||
struct hb_object_header_t
|
||||
{
|
||||
hb_reference_count_t ref_count;
|
||||
hb_mutex_t lock;
|
||||
hb_user_data_array_t user_data;
|
||||
|
||||
#define HB_OBJECT_HEADER_STATIC {HB_REFERENCE_COUNT_INVALID}
|
||||
#define HB_OBJECT_HEADER_STATIC {HB_REFERENCE_COUNT_INVALID, HB_MUTEX_INIT, HB_USER_DATA_ARRAY_INIT}
|
||||
|
||||
static inline void *create (unsigned int size) {
|
||||
hb_object_header_t *obj = (hb_object_header_t *) calloc (1, size);
|
||||
|
@ -113,6 +117,7 @@ struct hb_object_header_t
|
|||
|
||||
inline void init (void) {
|
||||
ref_count.init (1);
|
||||
lock.init ();
|
||||
user_data.init ();
|
||||
}
|
||||
|
||||
|
@ -132,9 +137,9 @@ struct hb_object_header_t
|
|||
if (ref_count.dec () != 1)
|
||||
return false;
|
||||
|
||||
ref_count.init (HB_REFERENCE_COUNT_INVALID_VALUE);
|
||||
|
||||
user_data.finish ();
|
||||
ref_count.finish (); /* Do this before user_data */
|
||||
user_data.finish (lock);
|
||||
lock.finish ();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -146,11 +151,14 @@ struct hb_object_header_t
|
|||
if (unlikely (!this || this->is_inert ()))
|
||||
return false;
|
||||
|
||||
return user_data.set (key, data, destroy_func, replace);
|
||||
return user_data.set (key, data, destroy_func, replace, lock);
|
||||
}
|
||||
|
||||
inline void *get_user_data (hb_user_data_key_t *key) {
|
||||
return user_data.get (key);
|
||||
if (unlikely (!this || this->is_inert ()))
|
||||
return NULL;
|
||||
|
||||
return user_data.get (key, lock);
|
||||
}
|
||||
|
||||
inline void trace (const char *function) const {
|
||||
|
|
|
@ -239,9 +239,10 @@ typedef int (*hb_compare_func_t) (const void *, const void *);
|
|||
/* arrays and maps */
|
||||
|
||||
|
||||
#define HB_PREALLOCED_ARRAY_INIT {0}
|
||||
template <typename Type, unsigned int StaticSize>
|
||||
struct hb_prealloced_array_t {
|
||||
|
||||
struct hb_prealloced_array_t
|
||||
{
|
||||
unsigned int len;
|
||||
unsigned int allocated;
|
||||
Type *array;
|
||||
|
@ -342,14 +343,12 @@ struct hb_prealloced_array_t {
|
|||
}
|
||||
};
|
||||
|
||||
template <typename Type>
|
||||
struct hb_array_t : hb_prealloced_array_t<Type, 2> {};
|
||||
|
||||
|
||||
#define HB_LOCKABLE_SET_INIT {HB_PREALLOCED_ARRAY_INIT}
|
||||
template <typename item_t, typename lock_t>
|
||||
struct hb_lockable_set_t
|
||||
{
|
||||
hb_array_t <item_t> items;
|
||||
hb_prealloced_array_t <item_t, 2> items;
|
||||
|
||||
inline void init (void) { items.init (); }
|
||||
|
||||
|
|
Loading…
Reference in New Issue