diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-07-24 09:54:23 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-07-24 09:54:44 +0000 |
commit | 836b47cb7e99a977c5a23b059ca1d0b5065d310e (patch) | |
tree | 1604da8f482d02effa033c94a84be42bc0c848c3 /src/libnetdata/locks/locks.c | |
parent | Releasing debian version 1.44.3-2. (diff) | |
download | netdata-836b47cb7e99a977c5a23b059ca1d0b5065d310e.tar.xz netdata-836b47cb7e99a977c5a23b059ca1d0b5065d310e.zip |
Merging upstream version 1.46.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | src/libnetdata/locks/locks.c (renamed from libnetdata/locks/locks.c) | 213 |
1 files changed, 94 insertions, 119 deletions
diff --git a/libnetdata/locks/locks.c b/src/libnetdata/locks/locks.c index 625dd052c..d01ee29f1 100644 --- a/libnetdata/locks/locks.c +++ b/src/libnetdata/locks/locks.c @@ -19,64 +19,6 @@ #endif // NETDATA_TRACE_RWLOCKS // ---------------------------------------------------------------------------- -// automatic thread cancelability management, based on locks - -static __thread int netdata_thread_first_cancelability = 0; -static __thread int netdata_thread_nested_disables = 0; - -static __thread size_t netdata_locks_acquired_rwlocks = 0; -static __thread size_t netdata_locks_acquired_mutexes = 0; - -inline void netdata_thread_disable_cancelability(void) { - if(!netdata_thread_nested_disables) { - int old; - int ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old); - - if(ret != 0) - netdata_log_error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", - netdata_thread_tag(), ret); - - netdata_thread_first_cancelability = old; - } - - netdata_thread_nested_disables++; -} - -inline void netdata_thread_enable_cancelability(void) { - if(unlikely(netdata_thread_nested_disables < 1)) { - internal_fatal(true, "THREAD_CANCELABILITY: trying to enable cancelability, but it was not not disabled"); - - netdata_log_error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): invalid thread cancelability count %d " - "on thread %s - results will be undefined - please report this!", - netdata_thread_nested_disables, netdata_thread_tag()); - - netdata_thread_nested_disables = 1; - } - - if(netdata_thread_nested_disables == 1) { - int old = 1; - int ret = pthread_setcancelstate(netdata_thread_first_cancelability, &old); - if(ret != 0) - netdata_log_error("THREAD_CANCELABILITY: pthread_setcancelstate() on thread %s returned error %d", - netdata_thread_tag(), - ret); - else { - if(old != PTHREAD_CANCEL_DISABLE) { - internal_fatal(true, "THREAD_CANCELABILITY: invalid old state cancelability"); - - netdata_log_error("THREAD_CANCELABILITY: netdata_thread_enable_cancelability(): old thread cancelability " - "on thread %s was changed, expected DISABLED (%d), found %s (%d) - please report this!", - netdata_thread_tag(), PTHREAD_CANCEL_DISABLE, - (old == PTHREAD_CANCEL_ENABLE) ? "ENABLED" : "UNKNOWN", - old); - } - } - } - - netdata_thread_nested_disables--; -} - -// ---------------------------------------------------------------------------- // mutex int __netdata_mutex_init(netdata_mutex_t *mutex) { @@ -94,27 +36,22 @@ int __netdata_mutex_destroy(netdata_mutex_t *mutex) { } int __netdata_mutex_lock(netdata_mutex_t *mutex) { - netdata_thread_disable_cancelability(); - int ret = pthread_mutex_lock(mutex); if(unlikely(ret != 0)) { - netdata_thread_enable_cancelability(); netdata_log_error("MUTEX_LOCK: failed to get lock (code %d)", ret); } else - netdata_locks_acquired_mutexes++; + nd_thread_mutex_locked(); return ret; } int __netdata_mutex_trylock(netdata_mutex_t *mutex) { - netdata_thread_disable_cancelability(); - int ret = pthread_mutex_trylock(mutex); if(ret != 0) - netdata_thread_enable_cancelability(); + ; else - netdata_locks_acquired_mutexes++; + nd_thread_mutex_locked(); return ret; } @@ -123,10 +60,8 @@ int __netdata_mutex_unlock(netdata_mutex_t *mutex) { int ret = pthread_mutex_unlock(mutex); if(unlikely(ret != 0)) netdata_log_error("MUTEX_LOCK: failed to unlock (code %d).", ret); - else { - netdata_locks_acquired_mutexes--; - netdata_thread_enable_cancelability(); - } + else + nd_thread_mutex_unlocked(); return ret; } @@ -226,65 +161,61 @@ int __netdata_rwlock_init(netdata_rwlock_t *rwlock) { } int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock) { - netdata_thread_disable_cancelability(); - int ret = pthread_rwlock_rdlock(&rwlock->rwlock_t); - if(unlikely(ret != 0)) { - netdata_thread_enable_cancelability(); + if(unlikely(ret != 0)) netdata_log_error("RW_LOCK: failed to obtain read lock (code %d)", ret); - } else - netdata_locks_acquired_rwlocks++; + nd_thread_rwlock_read_locked(); return ret; } int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock) { - netdata_thread_disable_cancelability(); - int ret = pthread_rwlock_wrlock(&rwlock->rwlock_t); - if(unlikely(ret != 0)) { + if(unlikely(ret != 0)) netdata_log_error("RW_LOCK: failed to obtain write lock (code %d)", ret); - netdata_thread_enable_cancelability(); - } else - netdata_locks_acquired_rwlocks++; + nd_thread_rwlock_write_locked(); return ret; } -int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock) { +int __netdata_rwlock_rdunlock(netdata_rwlock_t *rwlock) { int ret = pthread_rwlock_unlock(&rwlock->rwlock_t); if(unlikely(ret != 0)) netdata_log_error("RW_LOCK: failed to release lock (code %d)", ret); - else { - netdata_thread_enable_cancelability(); - netdata_locks_acquired_rwlocks--; - } + else + nd_thread_rwlock_read_unlocked(); return ret; } -int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock) { - netdata_thread_disable_cancelability(); +int __netdata_rwlock_wrunlock(netdata_rwlock_t *rwlock) { + int ret = pthread_rwlock_unlock(&rwlock->rwlock_t); + if(unlikely(ret != 0)) + netdata_log_error("RW_LOCK: failed to release lock (code %d)", ret); + else + nd_thread_rwlock_write_unlocked(); + return ret; +} + +int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock) { int ret = pthread_rwlock_tryrdlock(&rwlock->rwlock_t); if(ret != 0) - netdata_thread_enable_cancelability(); + ; else - netdata_locks_acquired_rwlocks++; + nd_thread_rwlock_read_locked(); return ret; } int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) { - netdata_thread_disable_cancelability(); - int ret = pthread_rwlock_trywrlock(&rwlock->rwlock_t); if(ret != 0) - netdata_thread_enable_cancelability(); + ; else - netdata_locks_acquired_rwlocks++; + nd_thread_rwlock_write_locked(); return ret; } @@ -297,15 +228,11 @@ void spinlock_init(SPINLOCK *spinlock) { memset(spinlock, 0, sizeof(SPINLOCK)); } -void spinlock_lock(SPINLOCK *spinlock) { - static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 }; - +static inline void spinlock_lock_internal(SPINLOCK *spinlock) { #ifdef NETDATA_INTERNAL_CHECKS size_t spins = 0; #endif - netdata_thread_disable_cancelability(); - for(int i = 1; __atomic_load_n(&spinlock->locked, __ATOMIC_RELAXED) || __atomic_test_and_set(&spinlock->locked, __ATOMIC_ACQUIRE) @@ -317,7 +244,7 @@ void spinlock_lock(SPINLOCK *spinlock) { #endif if(unlikely(i == 8)) { i = 0; - nanosleep(&ns, NULL); + tinysleep(); } } @@ -325,31 +252,62 @@ void spinlock_lock(SPINLOCK *spinlock) { #ifdef NETDATA_INTERNAL_CHECKS spinlock->spins += spins; - spinlock->locker_pid = gettid(); + spinlock->locker_pid = gettid_cached(); #endif + + nd_thread_spinlock_locked(); } -void spinlock_unlock(SPINLOCK *spinlock) { +static inline void spinlock_unlock_internal(SPINLOCK *spinlock) { #ifdef NETDATA_INTERNAL_CHECKS spinlock->locker_pid = 0; #endif __atomic_clear(&spinlock->locked, __ATOMIC_RELEASE); - netdata_thread_enable_cancelability(); -} -bool spinlock_trylock(SPINLOCK *spinlock) { - netdata_thread_disable_cancelability(); + nd_thread_spinlock_unlocked(); +} +static inline bool spinlock_trylock_internal(SPINLOCK *spinlock) { if(!__atomic_load_n(&spinlock->locked, __ATOMIC_RELAXED) && - !__atomic_test_and_set(&spinlock->locked, __ATOMIC_ACQUIRE)) + !__atomic_test_and_set(&spinlock->locked, __ATOMIC_ACQUIRE)) { // we got the lock + nd_thread_spinlock_locked(); return true; + } - // we didn't get the lock - netdata_thread_enable_cancelability(); return false; } +void spinlock_lock(SPINLOCK *spinlock) +{ + spinlock_lock_internal(spinlock); +} + +void spinlock_unlock(SPINLOCK *spinlock) +{ + spinlock_unlock_internal(spinlock); +} + +bool spinlock_trylock(SPINLOCK *spinlock) +{ + return spinlock_trylock_internal(spinlock); +} + +void spinlock_lock_cancelable(SPINLOCK *spinlock) +{ + spinlock_lock_internal(spinlock); +} + +void spinlock_unlock_cancelable(SPINLOCK *spinlock) +{ + spinlock_unlock_internal(spinlock); +} + +bool spinlock_trylock_cancelable(SPINLOCK *spinlock) +{ + return spinlock_trylock_internal(spinlock); +} + // ---------------------------------------------------------------------------- // rw_spinlock implementation @@ -359,11 +317,11 @@ void rw_spinlock_init(RW_SPINLOCK *rw_spinlock) { } void rw_spinlock_read_lock(RW_SPINLOCK *rw_spinlock) { - netdata_thread_disable_cancelability(); - spinlock_lock(&rw_spinlock->spinlock); __atomic_add_fetch(&rw_spinlock->readers, 1, __ATOMIC_RELAXED); spinlock_unlock(&rw_spinlock->spinlock); + + nd_thread_rwspinlock_read_locked(); } void rw_spinlock_read_unlock(RW_SPINLOCK *rw_spinlock) { @@ -375,12 +333,10 @@ void rw_spinlock_read_unlock(RW_SPINLOCK *rw_spinlock) { fatal("RW_SPINLOCK: readers is negative %d", x); #endif - netdata_thread_enable_cancelability(); + nd_thread_rwspinlock_read_unlocked(); } void rw_spinlock_write_lock(RW_SPINLOCK *rw_spinlock) { - static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 }; - size_t spins = 0; while(1) { spins++; @@ -391,21 +347,24 @@ void rw_spinlock_write_lock(RW_SPINLOCK *rw_spinlock) { // Busy wait until all readers have released their locks. spinlock_unlock(&rw_spinlock->spinlock); - nanosleep(&ns, NULL); + tinysleep(); } (void)spins; + + nd_thread_rwspinlock_write_locked(); } void rw_spinlock_write_unlock(RW_SPINLOCK *rw_spinlock) { spinlock_unlock(&rw_spinlock->spinlock); + nd_thread_rwspinlock_write_unlocked(); } bool rw_spinlock_tryread_lock(RW_SPINLOCK *rw_spinlock) { if(spinlock_trylock(&rw_spinlock->spinlock)) { __atomic_add_fetch(&rw_spinlock->readers, 1, __ATOMIC_RELAXED); spinlock_unlock(&rw_spinlock->spinlock); - netdata_thread_disable_cancelability(); + nd_thread_rwspinlock_read_locked(); return true; } @@ -416,6 +375,7 @@ bool rw_spinlock_trywrite_lock(RW_SPINLOCK *rw_spinlock) { if(spinlock_trylock(&rw_spinlock->spinlock)) { if (__atomic_load_n(&rw_spinlock->readers, __ATOMIC_RELAXED) == 0) { // No readers, we've successfully acquired the write lock + nd_thread_rwspinlock_write_locked(); return true; } else { @@ -548,7 +508,22 @@ int netdata_rwlock_wrlock_debug(const char *file __maybe_unused, const char *fun return ret; } -int netdata_rwlock_unlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, +int netdata_rwlock_rdunlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, + const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { + + netdata_rwlock_locker *locker = find_rwlock_locker(file, function, line, rwlock); + + if(unlikely(!locker)) + fatal("UNLOCK WITHOUT LOCK"); + + int ret = __netdata_rwlock_rdunlock(rwlock); + if(likely(!ret)) + remove_rwlock_locker(file, function, line, rwlock, locker); + + return ret; +} + +int netdata_rwlock_wrunlock_debug(const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) { netdata_rwlock_locker *locker = find_rwlock_locker(file, function, line, rwlock); @@ -556,7 +531,7 @@ int netdata_rwlock_unlock_debug(const char *file __maybe_unused, const char *fun if(unlikely(!locker)) fatal("UNLOCK WITHOUT LOCK"); - int ret = __netdata_rwlock_unlock(rwlock); + int ret = __netdata_rwlock_wrunlock(rwlock); if(likely(!ret)) remove_rwlock_locker(file, function, line, rwlock, locker); |