summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0177-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0177-locking-rtmutex-Use-custom-scheduling-function-for-s.patch')
-rw-r--r--debian/patches-rt/0177-locking-rtmutex-Use-custom-scheduling-function-for-s.patch20
1 files changed, 10 insertions, 10 deletions
diff --git a/debian/patches-rt/0177-locking-rtmutex-Use-custom-scheduling-function-for-s.patch b/debian/patches-rt/0177-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
index 9cabce468..84f1a2bad 100644
--- a/debian/patches-rt/0177-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
+++ b/debian/patches-rt/0177-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
@@ -1,9 +1,9 @@
-From 0a34a9993d2798ba232792f998e5ae5fe7519730 Mon Sep 17 00:00:00 2001
+From cdfb8372ad6e17318e854ebfec3fd9201fa5602e Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 6 Oct 2020 13:07:17 +0200
Subject: [PATCH 177/323] locking/rtmutex: Use custom scheduling function for
spin-schedule()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.215-rt107.tar.xz
PREEMPT_RT builds the rwsem, mutex, spinlock and rwlock typed locks on
top of a rtmutex lock. While blocked task->pi_blocked_on is set
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7 files changed, 32 insertions(+), 21 deletions(-)
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
-index e83f0982b99c..f1486b32502c 100644
+index e83f0982b99c1..f1486b32502c1 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -81,6 +81,9 @@ static inline bool should_resched(int preempt_offset)
@@ -54,7 +54,7 @@ index e83f0982b99c..f1486b32502c 100644
void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
-index a334dd0d7c42..50e0c0ab7b97 100644
+index a334dd0d7c42c..50e0c0ab7b97b 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -103,6 +103,9 @@ static __always_inline bool should_resched(int preempt_offset)
@@ -68,7 +68,7 @@ index a334dd0d7c42..50e0c0ab7b97 100644
# define __preempt_schedule() \
asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT)
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
-index b4d43a4af5f7..ac255e889462 100644
+index b4d43a4af5f79..ac255e8894629 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -79,6 +79,9 @@ static __always_inline bool should_resched(int preempt_offset)
@@ -82,7 +82,7 @@ index b4d43a4af5f7..ac255e889462 100644
#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
-index e5af028c08b4..994c25640e15 100644
+index e5af028c08b49..994c25640e156 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -39,20 +39,12 @@ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
@@ -107,7 +107,7 @@ index e5af028c08b4..994c25640e15 100644
extern void normalize_rt_tasks(void);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index c095d1b92f70..2fe178651254 100644
+index c095d1b92f702..2fe1786512543 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1067,7 +1067,7 @@ void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
@@ -120,7 +120,7 @@ index c095d1b92f70..2fe178651254 100644
raw_spin_lock_irqsave(&lock->wait_lock, flags);
diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c
-index 1ee16b8fedd7..16be7111aae7 100644
+index 1ee16b8fedd77..16be7111aae71 100644
--- a/kernel/locking/rwlock-rt.c
+++ b/kernel/locking/rwlock-rt.c
@@ -211,7 +211,7 @@ static void __write_rt_lock(struct rt_rw_lock *lock)
@@ -133,7 +133,7 @@ index 1ee16b8fedd7..16be7111aae7 100644
raw_spin_lock_irqsave(&m->wait_lock, flags);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index be5d41ed6ff2..aaeed4b14278 100644
+index be5d41ed6ff21..aaeed4b14278a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5001,7 +5001,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
@@ -239,5 +239,5 @@ index be5d41ed6ff2..aaeed4b14278 100644
sched_preempt_enable_no_resched();
} while (need_resched());
--
-2.43.0
+2.44.0