summaryrefslogtreecommitdiffstats
path: root/include/spinlock
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-23 11:29:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-23 11:37:28 +0000
commita7283ab143d4e95e8f5f22b58c61cb4e2f604749 (patch)
tree3ec5165ac7f1299f5c0dc3e41d7560a06e6267f5 /include/spinlock
parentAdding debian version 0.6.0-2. (diff)
downloadck-a7283ab143d4e95e8f5f22b58c61cb4e2f604749.tar.xz
ck-a7283ab143d4e95e8f5f22b58c61cb4e2f604749.zip
Merging upstream version 0.7.1 (Closes: #991419).
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include/spinlock')
-rw-r--r--include/spinlock/dec.h3
-rw-r--r--include/spinlock/fas.h9
-rw-r--r--include/spinlock/hclh.h12
3 files changed, 15 insertions, 9 deletions
diff --git a/include/spinlock/dec.h b/include/spinlock/dec.h
index 11d36dd..3e36bf7 100644
--- a/include/spinlock/dec.h
+++ b/include/spinlock/dec.h
@@ -111,7 +111,8 @@ ck_spinlock_dec_lock_eb(struct ck_spinlock_dec *lock)
if (r == true)
break;
- ck_backoff_eb(&backoff);
+ while (ck_pr_load_uint(&lock->value) != 1)
+ ck_backoff_eb(&backoff);
}
ck_pr_fence_lock();
diff --git a/include/spinlock/fas.h b/include/spinlock/fas.h
index 4e6c123..bfe91fe 100644
--- a/include/spinlock/fas.h
+++ b/include/spinlock/fas.h
@@ -77,10 +77,11 @@ CK_CC_INLINE static void
ck_spinlock_fas_lock(struct ck_spinlock_fas *lock)
{
- while (ck_pr_fas_uint(&lock->value, true) == true) {
- while (ck_pr_load_uint(&lock->value) == true)
- ck_pr_stall();
- }
+ while (CK_CC_UNLIKELY(ck_pr_fas_uint(&lock->value, true) == true)) {
+ do {
+ ck_pr_stall();
+ } while (ck_pr_load_uint(&lock->value) == true);
+ }
ck_pr_fence_lock();
return;
diff --git a/include/spinlock/hclh.h b/include/spinlock/hclh.h
index 296448b..ece56c6 100644
--- a/include/spinlock/hclh.h
+++ b/include/spinlock/hclh.h
@@ -81,6 +81,8 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue,
thread->wait = true;
thread->splice = false;
thread->cluster_id = (*local_queue)->cluster_id;
+ /* Make sure previous->previous doesn't appear to be NULL */
+ thread->previous = *local_queue;
/* Serialize with respect to update of local queue. */
ck_pr_fence_store_atomic();
@@ -91,13 +93,15 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue,
/* Wait until previous thread from the local queue is done with lock. */
ck_pr_fence_load();
- if (previous->previous != NULL &&
- previous->cluster_id == thread->cluster_id) {
- while (ck_pr_load_uint(&previous->wait) == true)
+ if (previous->previous != NULL) {
+ while (ck_pr_load_uint(&previous->wait) == true &&
+ ck_pr_load_int(&previous->cluster_id) == thread->cluster_id &&
+ ck_pr_load_uint(&previous->splice) == false)
ck_pr_stall();
/* We're head of the global queue, we're done */
- if (ck_pr_load_uint(&previous->splice) == false)
+ if (ck_pr_load_int(&previous->cluster_id) == thread->cluster_id &&
+ ck_pr_load_uint(&previous->splice) == false)
return;
}