Use new mutex in dmon to avoid possible lock-up

We rely on one variable _dmon.modify_watches shared between thread to
ensure that we don't lock with the dmon polling thread waiting indefinitely
and helding a lock.

To ensure that the polling thread sees modifications done to 'modify_watches'
we use an additional mutex that act as a memory barrier.
This commit is contained in:
Francesco Abbate 2022-01-21 11:08:16 +01:00
parent f7193c4fa2
commit bc9f8a4075
1 changed files with 42 additions and 18 deletions

View File

@ -159,10 +159,6 @@ DMON_API_DECL void dmon_unwatch(dmon_watch_id id);
# define NOMINMAX
# endif
# include <windows.h>
# include <intrin.h>
# ifdef _MSC_VER
# pragma intrinsic(_InterlockedExchange)
# endif
#elif DMON_OS_LINUX
# ifndef __USE_MISC
# define __USE_MISC
@ -406,7 +402,8 @@ typedef struct dmon__state {
dmon__watch_state watches[DMON_MAX_WATCHES];
HANDLE thread_handle;
CRITICAL_SECTION mutex;
volatile LONG modify_watches;
volatile int modify_watches;
CRITICAL_SECTION modify_watches_mutex;
dmon__win32_event* events;
bool quit;
HANDLE wake_event;
@ -492,6 +489,13 @@ _DMON_PRIVATE void dmon__win32_process_events(void)
stb_sb_reset(_dmon.events);
}
static int dmon__safe_get_modify_watches() {
EnterCriticalSection(&_dmon.modify_watches_mutex);
const int value = _dmon.modify_watches;
LeaveCriticalSection(&_dmon.modify_watches_mutex);
return value;
}
_DMON_PRIVATE DWORD WINAPI dmon__thread(LPVOID arg)
{
_DMON_UNUSED(arg);
@ -502,7 +506,8 @@ _DMON_PRIVATE DWORD WINAPI dmon__thread(LPVOID arg)
uint64_t msecs_elapsed = 0;
while (!_dmon.quit) {
if (_dmon.modify_watches || !TryEnterCriticalSection(&_dmon.mutex)) {
if (dmon__safe_get_modify_watches() ||
!TryEnterCriticalSection(&_dmon.mutex)) {
Sleep(10);
continue;
}
@ -587,6 +592,7 @@ DMON_API_IMPL void dmon_init(void)
{
DMON_ASSERT(!_dmon_init);
InitializeCriticalSection(&_dmon.mutex);
InitializeCriticalSection(&_dmon.modify_watches_mutex);
_dmon.thread_handle =
CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)dmon__thread, NULL, 0, NULL);
@ -596,11 +602,20 @@ DMON_API_IMPL void dmon_init(void)
}
static void dmon__enter_critical_wakeup(void) {
_InterlockedExchange(&_dmon.modify_watches, 1);
EnterCriticalSection(&_dmon.modify_watches_mutex);
_dmon.modify_watches = 1;
if (TryEnterCriticalSection(&_dmon.mutex) == 0) {
SetEvent(_dmon.wake_event);
EnterCriticalSection(&_dmon.mutex);
}
LeaveCriticalSection(&_dmon.modify_watches_mutex);
}
static void dmon__leave_critical_wakeup(void) {
EnterCriticalSection(&_dmon.modify_watches_mutex);
_dmon.modify_watches = 0;
LeaveCriticalSection(&_dmon.modify_watches_mutex);
LeaveCriticalSection(&_dmon.mutex);
}
DMON_API_IMPL void dmon_deinit(void)
@ -617,8 +632,9 @@ DMON_API_IMPL void dmon_deinit(void)
dmon__unwatch(&_dmon.watches[i]);
}
LeaveCriticalSection(&_dmon.mutex);
dmon__leave_critical_wakeup();
DeleteCriticalSection(&_dmon.mutex);
DeleteCriticalSection(&_dmon.modify_watches_mutex);
stb_sb_free(_dmon.events);
_dmon_init = false;
}
@ -665,19 +681,16 @@ DMON_API_IMPL dmon_watch_id dmon_watch(const char* rootdir,
!dmon__refresh_watch(watch)) {
dmon__unwatch(watch);
*error_code = DMON_ERROR_WATCH_DIR;
LeaveCriticalSection(&_dmon.mutex);
_InterlockedExchange(&_dmon.modify_watches, 0);
dmon__leave_critical_wakeup();
return dmon__make_id(0);
}
} else {
*error_code = DMON_ERROR_OPEN_DIR;
LeaveCriticalSection(&_dmon.mutex);
_InterlockedExchange(&_dmon.modify_watches, 0);
dmon__leave_critical_wakeup();
return dmon__make_id(0);
}
LeaveCriticalSection(&_dmon.mutex);
_InterlockedExchange(&_dmon.modify_watches, 0);
dmon__leave_critical_wakeup();
return dmon__make_id(id);
}
@ -696,8 +709,7 @@ DMON_API_IMPL void dmon_unwatch(dmon_watch_id id)
}
--_dmon.num_watches;
LeaveCriticalSection(&_dmon.mutex);
_InterlockedExchange(&_dmon.modify_watches, 0);
dmon__leave_critical_wakeup();
}
#elif DMON_OS_LINUX
@ -733,7 +745,8 @@ typedef struct dmon__state {
int num_watches;
pthread_t thread_handle;
pthread_mutex_t mutex;
int wait_flag;
volatile int wait_flag;
pthread_mutex_t wait_flag_mutex;
int wake_event_pipe[2];
bool quit;
} dmon__state;
@ -1013,6 +1026,13 @@ _DMON_PRIVATE void dmon__inotify_process_events(void)
stb_sb_reset(_dmon.events);
}
_DMON_PRIVATE int dmon__safe_get_wait_flag() {
pthread_mutex_lock(&_dmon.wait_flag_mutex);
const int value = _dmon.wait_flag;
pthread_mutex_unlock(&_dmon.wait_flag_mutex);
return value;
}
static void* dmon__thread(void* arg)
{
_DMON_UNUSED(arg);
@ -1028,7 +1048,9 @@ static void* dmon__thread(void* arg)
while (!_dmon.quit) {
nanosleep(&req, &rem);
if (_dmon.num_watches == 0 || _dmon.wait_flag == 1 || pthread_mutex_trylock(&_dmon.mutex) != 0) {
if (_dmon.num_watches == 0 ||
dmon__safe_get_wait_flag() ||
pthread_mutex_trylock(&_dmon.mutex) != 0) {
continue;
}
@ -1106,6 +1128,7 @@ static void* dmon__thread(void* arg)
}
_DMON_PRIVATE void dmon__mutex_wakeup_lock(void) {
pthread_mutex_lock(&_dmon.wait_flag_mutex);
_dmon.wait_flag = 1;
if (pthread_mutex_trylock(&_dmon.mutex) != 0) {
char send_char = 1;
@ -1113,6 +1136,7 @@ _DMON_PRIVATE void dmon__mutex_wakeup_lock(void) {
pthread_mutex_lock(&_dmon.mutex);
}
_dmon.wait_flag = 0;
pthread_mutex_unlock(&_dmon.wait_flag_mutex);
}
_DMON_PRIVATE void dmon__unwatch(dmon__watch_state* watch)