From b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 27 Apr 2024 12:06:00 +0200 Subject: Adding debian version 5.10.209-2. Signed-off-by: Daniel Baumann --- .../patches-rt/0194-rt-Add-local-irq-locks.patch | 182 +++++++++++++++++++++ 1 file changed, 182 insertions(+) create mode 100644 debian/patches-rt/0194-rt-Add-local-irq-locks.patch (limited to 'debian/patches-rt/0194-rt-Add-local-irq-locks.patch') diff --git a/debian/patches-rt/0194-rt-Add-local-irq-locks.patch b/debian/patches-rt/0194-rt-Add-local-irq-locks.patch new file mode 100644 index 000000000..53b3fd552 --- /dev/null +++ b/debian/patches-rt/0194-rt-Add-local-irq-locks.patch @@ -0,0 +1,182 @@ +From 0d3e0b0db513aac6e4cd2e2d084b7a76f4d28c44 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Mon, 20 Jun 2011 09:03:47 +0200 +Subject: [PATCH 194/323] rt: Add local irq locks +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +Introduce locallock. For !RT this maps to preempt_disable()/ +local_irq_disable() so there is not much that changes. For RT this will +map to a spinlock. This makes preemption possible and locked "ressource" +gets the lockdep anotation it wouldn't have otherwise. The locks are +recursive for owner == current. Also, all locks user migrate_disable() +which ensures that the task is not migrated to another CPU while the lock +is held and the owner is preempted. + +Signed-off-by: Thomas Gleixner +--- + include/linux/local_lock_internal.h | 111 ++++++++++++++++++++++++++-- + 1 file changed, 103 insertions(+), 8 deletions(-) + +diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h +index 3f02b818625e..1b8ae034946f 100644 +--- a/include/linux/local_lock_internal.h ++++ b/include/linux/local_lock_internal.h +@@ -7,13 +7,39 @@ + #include + + typedef struct { +-#ifdef CONFIG_DEBUG_LOCK_ALLOC ++#ifdef CONFIG_PREEMPT_RT ++ spinlock_t lock; ++ struct task_struct *owner; ++ int nestcnt; ++ ++#elif defined(CONFIG_DEBUG_LOCK_ALLOC) + struct lockdep_map dep_map; + struct task_struct *owner; + #endif + } local_lock_t; + +-#ifdef CONFIG_DEBUG_LOCK_ALLOC ++#ifdef CONFIG_PREEMPT_RT ++ ++#define INIT_LOCAL_LOCK(lockname) { \ ++ __SPIN_LOCK_UNLOCKED((lockname).lock), \ ++ .owner = NULL, \ ++ .nestcnt = 0, \ ++ } ++ ++static inline void ___local_lock_init(local_lock_t *l) ++{ ++ l->owner = NULL; ++ l->nestcnt = 0; ++} ++ ++#define __local_lock_init(l) \ ++do { \ ++ spin_lock_init(&(l)->lock); \ ++ ___local_lock_init(l); \ ++} while (0) ++ ++#elif defined(CONFIG_DEBUG_LOCK_ALLOC) ++ + # define LOCAL_LOCK_DEBUG_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ +@@ -21,7 +47,33 @@ typedef struct { + .lock_type = LD_LOCK_PERCPU, \ + }, \ + .owner = NULL, ++#endif ++ ++#ifdef CONFIG_PREEMPT_RT + ++static inline void local_lock_acquire(local_lock_t *l) ++{ ++ if (l->owner != current) { ++ spin_lock(&l->lock); ++ DEBUG_LOCKS_WARN_ON(l->owner); ++ DEBUG_LOCKS_WARN_ON(l->nestcnt); ++ l->owner = current; ++ } ++ l->nestcnt++; ++} ++ ++static inline void local_lock_release(local_lock_t *l) ++{ ++ DEBUG_LOCKS_WARN_ON(l->nestcnt == 0); ++ DEBUG_LOCKS_WARN_ON(l->owner != current); ++ if (--l->nestcnt) ++ return; ++ ++ l->owner = NULL; ++ spin_unlock(&l->lock); ++} ++ ++#elif defined(CONFIG_DEBUG_LOCK_ALLOC) + static inline void local_lock_acquire(local_lock_t *l) + { + lock_map_acquire(&l->dep_map); +@@ -47,6 +99,47 @@ static inline void local_lock_release(local_lock_t *l) { } + static inline void local_lock_debug_init(local_lock_t *l) { } + #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ + ++#ifdef CONFIG_PREEMPT_RT ++ ++#define __local_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ local_lock_acquire(this_cpu_ptr(lock)); \ ++ } while (0) ++ ++#define __local_unlock(lock) \ ++ do { \ ++ local_lock_release(this_cpu_ptr(lock)); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define __local_lock_irq(lock) \ ++ do { \ ++ migrate_disable(); \ ++ local_lock_acquire(this_cpu_ptr(lock)); \ ++ } while (0) ++ ++#define __local_lock_irqsave(lock, flags) \ ++ do { \ ++ migrate_disable(); \ ++ flags = 0; \ ++ local_lock_acquire(this_cpu_ptr(lock)); \ ++ } while (0) ++ ++#define __local_unlock_irq(lock) \ ++ do { \ ++ local_lock_release(this_cpu_ptr(lock)); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define __local_unlock_irqrestore(lock, flags) \ ++ do { \ ++ local_lock_release(this_cpu_ptr(lock)); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#else ++ + #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } + + #define __local_lock_init(lock) \ +@@ -66,6 +159,12 @@ do { \ + local_lock_acquire(this_cpu_ptr(lock)); \ + } while (0) + ++#define __local_unlock(lock) \ ++ do { \ ++ local_lock_release(this_cpu_ptr(lock)); \ ++ preempt_enable(); \ ++ } while (0) ++ + #define __local_lock_irq(lock) \ + do { \ + local_irq_disable(); \ +@@ -78,12 +177,6 @@ do { \ + local_lock_acquire(this_cpu_ptr(lock)); \ + } while (0) + +-#define __local_unlock(lock) \ +- do { \ +- local_lock_release(this_cpu_ptr(lock)); \ +- preempt_enable(); \ +- } while (0) +- + #define __local_unlock_irq(lock) \ + do { \ + local_lock_release(this_cpu_ptr(lock)); \ +@@ -95,3 +188,5 @@ do { \ + local_lock_release(this_cpu_ptr(lock)); \ + local_irq_restore(flags); \ + } while (0) ++ ++#endif +-- +2.43.0 + -- cgit v1.2.3