diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
commit | 76cb841cb886eef6b3bee341a2266c76578724ad (patch) | |
tree | f5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /kernel/locking/qrwlock.c | |
parent | Initial commit. (diff) | |
download | linux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip |
Adding upstream version 4.19.249.upstream/4.19.249
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'kernel/locking/qrwlock.c')
-rw-r--r-- | kernel/locking/qrwlock.c | 93 |
1 files changed, 93 insertions, 0 deletions
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c new file mode 100644 index 000000000..16c09cda3 --- /dev/null +++ b/kernel/locking/qrwlock.c @@ -0,0 +1,93 @@ +/* + * Queued read/write locks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. + * + * Authors: Waiman Long <waiman.long@hp.com> + */ +#include <linux/smp.h> +#include <linux/bug.h> +#include <linux/cpumask.h> +#include <linux/percpu.h> +#include <linux/hardirq.h> +#include <linux/spinlock.h> +#include <asm/qrwlock.h> + +/** + * queued_read_lock_slowpath - acquire read lock of a queue rwlock + * @lock: Pointer to queue rwlock structure + */ +void queued_read_lock_slowpath(struct qrwlock *lock) +{ + /* + * Readers come here when they cannot get the lock without waiting + */ + if (unlikely(in_interrupt())) { + /* + * Readers in interrupt context will get the lock immediately + * if the writer is just waiting (not holding the lock yet), + * so spin with ACQUIRE semantics until the lock is available + * without waiting in the queue. + */ + atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); + return; + } + atomic_sub(_QR_BIAS, &lock->cnts); + + /* + * Put the reader into the wait queue + */ + arch_spin_lock(&lock->wait_lock); + atomic_add(_QR_BIAS, &lock->cnts); + + /* + * The ACQUIRE semantics of the following spinning code ensure + * that accesses can't leak upwards out of our subsequent critical + * section in the case that the lock is currently held for write. + */ + atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); + + /* + * Signal the next one in queue to become queue head + */ + arch_spin_unlock(&lock->wait_lock); +} +EXPORT_SYMBOL(queued_read_lock_slowpath); + +/** + * queued_write_lock_slowpath - acquire write lock of a queue rwlock + * @lock : Pointer to queue rwlock structure + */ +void queued_write_lock_slowpath(struct qrwlock *lock) +{ + int cnts; + + /* Put the writer into the wait queue */ + arch_spin_lock(&lock->wait_lock); + + /* Try to acquire the lock directly if no reader is present */ + if (!atomic_read(&lock->cnts) && + (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)) + goto unlock; + + /* Set the waiting flag to notify readers that a writer is pending */ + atomic_add(_QW_WAITING, &lock->cnts); + + /* When no more readers or writers, set the locked flag */ + do { + cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING); + } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)); +unlock: + arch_spin_unlock(&lock->wait_lock); +} +EXPORT_SYMBOL(queued_write_lock_slowpath); |