From 08b74a000942a380fe028845f92cd3a0dee827d5 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Mon, 6 May 2024 03:02:38 +0200 Subject: Adding debian version 4.19.249-2. Signed-off-by: Daniel Baumann --- ...round-migrate_disable-enable-in-different.patch | 70 ++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch (limited to 'debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch') diff --git a/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch b/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch new file mode 100644 index 000000000..373f02a64 --- /dev/null +++ b/debian/patches-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch @@ -0,0 +1,70 @@ +From d142001fdbdd8cc5fb558e9c3fa110f575875355 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Wed, 8 Mar 2017 14:23:35 +0100 +Subject: [PATCH 060/347] futex: workaround migrate_disable/enable in different + context +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +migrate_disable()/migrate_enable() takes a different path in atomic() vs +!atomic() context. These little hacks ensure that we don't underflow / overflow +the migrate code counts properly while we lock the hb lockwith interrupts +enabled and unlock it with interrupts disabled. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/futex.c | 19 +++++++++++++++++++ + 1 file changed, 19 insertions(+) + +diff --git a/kernel/futex.c b/kernel/futex.c +index 3c67da9b8408..fac994367189 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -2998,6 +2998,14 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + * before __rt_mutex_start_proxy_lock() is done. + */ + raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); ++ /* ++ * the migrate_disable() here disables migration in the in_atomic() fast ++ * path which is enabled again in the following spin_unlock(). We have ++ * one migrate_disable() pending in the slow-path which is reversed ++ * after the raw_spin_unlock_irq() where we leave the atomic context. ++ */ ++ migrate_disable(); ++ + spin_unlock(q.lock_ptr); + /* + * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter +@@ -3006,6 +3014,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + */ + ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); + raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); ++ migrate_enable(); + + if (ret) { + if (ret == 1) +@@ -3140,11 +3149,21 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + * rt_waiter. Also see the WARN in wake_futex_pi(). + */ + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ /* ++ * Magic trickery for now to make the RT migrate disable ++ * logic happy. The following spin_unlock() happens with ++ * interrupts disabled so the internal migrate_enable() ++ * won't undo the migrate_disable() which was issued when ++ * locking hb->lock. ++ */ ++ migrate_disable(); + spin_unlock(&hb->lock); + + /* drops pi_state->pi_mutex.wait_lock */ + ret = wake_futex_pi(uaddr, uval, pi_state); + ++ migrate_enable(); ++ + put_pi_state(pi_state); + + /* +-- +2.36.1 + -- cgit v1.2.3