diff options
Diffstat (limited to 'debian/patches-rt/0273-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch')
-rw-r--r-- | debian/patches-rt/0273-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch | 245 |
1 files changed, 245 insertions, 0 deletions
diff --git a/debian/patches-rt/0273-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch b/debian/patches-rt/0273-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch new file mode 100644 index 000000000..391c7c48c --- /dev/null +++ b/debian/patches-rt/0273-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch @@ -0,0 +1,245 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Wed, 26 Jun 2019 17:44:21 +0200 +Subject: [PATCH 273/342] Revert "rtmutex: Handle the various new futex race + conditions" +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3cc6426a21b537ce05a7acb6a499a205559ae766 + +[ Upstream commit 9e0265c21af4d6388d47dcd5ce20f76ec3a2e468 ] + +Drop the RT fixup, the futex code will be changed to avoid the need for +the workaround. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> +--- + kernel/futex.c | 75 ++++++--------------------------- + kernel/locking/rtmutex.c | 36 +++------------- + kernel/locking/rtmutex_common.h | 2 - + 3 files changed, 20 insertions(+), 93 deletions(-) + +diff --git a/kernel/futex.c b/kernel/futex.c +index 3449a716a378..bcd01b5a47df 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -2255,16 +2255,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + requeue_pi_wake_futex(this, &key2, hb2); + drop_count++; + continue; +- } else if (ret == -EAGAIN) { +- /* +- * Waiter was woken by timeout or +- * signal and has set pi_blocked_on to +- * PI_WAKEUP_INPROGRESS before we +- * tried to enqueue it on the rtmutex. +- */ +- this->pi_state = NULL; +- put_pi_state(pi_state); +- continue; + } else if (ret) { + /* + * rt_mutex_start_proxy_lock() detected a +@@ -3343,7 +3333,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + { + struct hrtimer_sleeper timeout, *to = NULL; + struct rt_mutex_waiter rt_waiter; +- struct futex_hash_bucket *hb, *hb2; ++ struct futex_hash_bucket *hb; + union futex_key key2 = FUTEX_KEY_INIT; + struct futex_q q = futex_q_init; + int res, ret; +@@ -3401,55 +3391,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + /* Queue the futex_q, drop the hb lock, wait for wakeup. */ + futex_wait_queue_me(hb, &q, to); + +- /* +- * On RT we must avoid races with requeue and trying to block +- * on two mutexes (hb->lock and uaddr2's rtmutex) by +- * serializing access to pi_blocked_on with pi_lock. +- */ +- raw_spin_lock_irq(¤t->pi_lock); +- if (current->pi_blocked_on) { +- /* +- * We have been requeued or are in the process of +- * being requeued. +- */ +- raw_spin_unlock_irq(¤t->pi_lock); +- } else { +- /* +- * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS +- * prevents a concurrent requeue from moving us to the +- * uaddr2 rtmutex. After that we can safely acquire +- * (and possibly block on) hb->lock. +- */ +- current->pi_blocked_on = PI_WAKEUP_INPROGRESS; +- raw_spin_unlock_irq(¤t->pi_lock); +- +- spin_lock(&hb->lock); +- +- /* +- * Clean up pi_blocked_on. We might leak it otherwise +- * when we succeeded with the hb->lock in the fast +- * path. +- */ +- raw_spin_lock_irq(¤t->pi_lock); +- current->pi_blocked_on = NULL; +- raw_spin_unlock_irq(¤t->pi_lock); +- +- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); +- spin_unlock(&hb->lock); +- if (ret) +- goto out_put_keys; +- } ++ spin_lock(&hb->lock); ++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); ++ spin_unlock(&hb->lock); ++ if (ret) ++ goto out_put_keys; + + /* +- * In order to be here, we have either been requeued, are in +- * the process of being requeued, or requeue successfully +- * acquired uaddr2 on our behalf. If pi_blocked_on was +- * non-null above, we may be racing with a requeue. Do not +- * rely on q->lock_ptr to be hb2->lock until after blocking on +- * hb->lock or hb2->lock. The futex_requeue dropped our key1 +- * reference and incremented our key2 reference count. ++ * In order for us to be here, we know our q.key == key2, and since ++ * we took the hb->lock above, we also know that futex_requeue() has ++ * completed and we no longer have to concern ourselves with a wakeup ++ * race with the atomic proxy lock acquisition by the requeue code. The ++ * futex_requeue dropped our key1 reference and incremented our key2 ++ * reference count. + */ +- hb2 = hash_futex(&key2); + + /* Check if the requeue code acquired the second futex for us. */ + if (!q.rt_waiter) { +@@ -3458,8 +3413,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + * did a lock-steal - fix up the PI-state in that case. + */ + if (q.pi_state && (q.pi_state->owner != current)) { +- spin_lock(&hb2->lock); +- BUG_ON(&hb2->lock != q.lock_ptr); ++ spin_lock(q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); + /* + * Drop the reference to the pi state which +@@ -3485,8 +3439,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + pi_mutex = &q.pi_state->pi_mutex; + ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); + +- spin_lock(&hb2->lock); +- BUG_ON(&hb2->lock != q.lock_ptr); ++ spin_lock(q.lock_ptr); + if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) + ret = 0; + +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index a87b65447991..05fcf8a75a51 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -142,11 +142,6 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) + WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); + } + +-static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) +-{ +- return waiter && waiter != PI_WAKEUP_INPROGRESS; +-} +- + /* + * We can speed up the acquire/release, if there's no debugging state to be + * set up. +@@ -420,8 +415,7 @@ int max_lock_depth = 1024; + + static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) + { +- return rt_mutex_real_waiter(p->pi_blocked_on) ? +- p->pi_blocked_on->lock : NULL; ++ return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; + } + + /* +@@ -557,7 +551,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, + * reached or the state of the chain has changed while we + * dropped the locks. + */ +- if (!rt_mutex_real_waiter(waiter)) ++ if (!waiter) + goto out_unlock_pi; + + /* +@@ -1327,22 +1321,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + return -EDEADLK; + + raw_spin_lock(&task->pi_lock); +- /* +- * In the case of futex requeue PI, this will be a proxy +- * lock. The task will wake unaware that it is enqueueed on +- * this lock. Avoid blocking on two locks and corrupting +- * pi_blocked_on via the PI_WAKEUP_INPROGRESS +- * flag. futex_wait_requeue_pi() sets this when it wakes up +- * before requeue (due to a signal or timeout). Do not enqueue +- * the task if PI_WAKEUP_INPROGRESS is set. +- */ +- if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { +- raw_spin_unlock(&task->pi_lock); +- return -EAGAIN; +- } +- +- BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); +- + waiter->task = task; + waiter->lock = lock; + waiter->prio = task->prio; +@@ -1366,7 +1344,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + rt_mutex_enqueue_pi(owner, waiter); + + rt_mutex_adjust_prio(owner); +- if (rt_mutex_real_waiter(owner->pi_blocked_on)) ++ if (owner->pi_blocked_on) + chain_walk = 1; + } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { + chain_walk = 1; +@@ -1466,7 +1444,7 @@ static void remove_waiter(struct rt_mutex *lock, + { + bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); + struct task_struct *owner = rt_mutex_owner(lock); +- struct rt_mutex *next_lock = NULL; ++ struct rt_mutex *next_lock; + + lockdep_assert_held(&lock->wait_lock); + +@@ -1492,8 +1470,7 @@ static void remove_waiter(struct rt_mutex *lock, + rt_mutex_adjust_prio(owner); + + /* Store the lock on which owner is blocked or NULL */ +- if (rt_mutex_real_waiter(owner->pi_blocked_on)) +- next_lock = task_blocked_on_lock(owner); ++ next_lock = task_blocked_on_lock(owner); + + raw_spin_unlock(&owner->pi_lock); + +@@ -1529,8 +1506,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) + raw_spin_lock_irqsave(&task->pi_lock, flags); + + waiter = task->pi_blocked_on; +- if (!rt_mutex_real_waiter(waiter) || +- rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { ++ if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + return; + } +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h +index 627340e7427e..f587e0422d23 100644 +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -132,8 +132,6 @@ enum rtmutex_chainwalk { + /* + * PI-futex support (proxy locking functions, etc.): + */ +-#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) +- + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); |