diff options
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/percpu-rwsem.c | 11 | ||||
-rw-r--r-- | kernel/locking/qspinlock_paravirt.h | 2 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 9 | ||||
-rw-r--r-- | kernel/locking/rwsem.c | 6 |
4 files changed, 17 insertions, 11 deletions
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index 185bd1c906..6083883c4f 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -223,9 +223,10 @@ static bool readers_active_check(struct percpu_rw_semaphore *sem) void __sched percpu_down_write(struct percpu_rw_semaphore *sem) { + bool contended = false; + might_sleep(); rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); - trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_WRITE); /* Notify readers to take the slow path. */ rcu_sync_enter(&sem->rss); @@ -234,8 +235,11 @@ void __sched percpu_down_write(struct percpu_rw_semaphore *sem) * Try set sem->block; this provides writer-writer exclusion. * Having sem->block set makes new readers block. */ - if (!__percpu_down_write_trylock(sem)) + if (!__percpu_down_write_trylock(sem)) { + trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_WRITE); percpu_rwsem_wait(sem, /* .reader = */ false); + contended = true; + } /* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */ @@ -247,7 +251,8 @@ void __sched percpu_down_write(struct percpu_rw_semaphore *sem) /* Wait for all active readers to complete. */ rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE); - trace_contention_end(sem, 0); + if (contended) + trace_contention_end(sem, 0); } EXPORT_SYMBOL_GPL(percpu_down_write); diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 6a0184e9c2..ae2b12f68b 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -294,8 +294,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) { struct pv_node *pn = (struct pv_node *)node; struct pv_node *pp = (struct pv_node *)prev; + bool __maybe_unused wait_early; int loop; - bool wait_early; for (;;) { for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 4a10e8c16f..88d08eeb8b 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -237,12 +237,13 @@ static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, */ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) { - unsigned long owner, *p = (unsigned long *) &lock->owner; + unsigned long *p = (unsigned long *) &lock->owner; + unsigned long owner, new; + owner = READ_ONCE(*p); do { - owner = *p; - } while (cmpxchg_relaxed(p, owner, - owner | RT_MUTEX_HAS_WAITERS) != owner); + new = owner | RT_MUTEX_HAS_WAITERS; + } while (!try_cmpxchg_relaxed(p, &owner, new)); /* * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 2340b6d90e..c6d17aee42 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -35,7 +35,7 @@ /* * The least significant 2 bits of the owner value has the following * meanings when set. - * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers + * - Bit 0: RWSEM_READER_OWNED - rwsem may be owned by readers (just a hint) * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock * * When the rwsem is reader-owned and a spinning writer has timed out, @@ -1002,8 +1002,8 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat /* * To prevent a constant stream of readers from starving a sleeping - * waiter, don't attempt optimistic lock stealing if the lock is - * currently owned by readers. + * writer, don't attempt optimistic lock stealing if the lock is + * very likely owned by readers. */ if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) && (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED)) |