summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0105-sched-Move-task_struct-cleanup-to-RCU.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0105-sched-Move-task_struct-cleanup-to-RCU.patch')
-rw-r--r--debian/patches-rt/0105-sched-Move-task_struct-cleanup-to-RCU.patch96
1 files changed, 96 insertions, 0 deletions
diff --git a/debian/patches-rt/0105-sched-Move-task_struct-cleanup-to-RCU.patch b/debian/patches-rt/0105-sched-Move-task_struct-cleanup-to-RCU.patch
new file mode 100644
index 000000000..8883673e8
--- /dev/null
+++ b/debian/patches-rt/0105-sched-Move-task_struct-cleanup-to-RCU.patch
@@ -0,0 +1,96 @@
+From 6b5806854133faa2eda014a6553665c6fa393d5e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 31 May 2011 16:59:16 +0200
+Subject: [PATCH 105/347] sched: Move task_struct cleanup to RCU
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+__put_task_struct() does quite some expensive work. We don't want to
+burden random tasks with that.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 3 +++
+ include/linux/sched/task.h | 11 ++++++++++-
+ kernel/fork.c | 15 ++++++++++++++-
+ 3 files changed, 27 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 5369eb1fd456..5e2ec9c86b4b 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1196,6 +1196,9 @@ struct task_struct {
+ unsigned int sequential_io;
+ unsigned int sequential_io_avg;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head put_rcu;
++#endif
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ unsigned long task_state_change;
+ #endif
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index 91401309b1aa..5f8c1d53bc11 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -90,6 +90,15 @@ extern void sched_exec(void);
+
+ #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __put_task_struct_cb(struct rcu_head *rhp);
++
++static inline void put_task_struct(struct task_struct *t)
++{
++ if (atomic_dec_and_test(&t->usage))
++ call_rcu(&t->put_rcu, __put_task_struct_cb);
++}
++#else
+ extern void __put_task_struct(struct task_struct *t);
+
+ static inline void put_task_struct(struct task_struct *t)
+@@ -97,7 +106,7 @@ static inline void put_task_struct(struct task_struct *t)
+ if (atomic_dec_and_test(&t->usage))
+ __put_task_struct(t);
+ }
+-
++#endif
+ struct task_struct *task_rcu_dereference(struct task_struct **ptask);
+
+ #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+diff --git a/kernel/fork.c b/kernel/fork.c
+index c27c196fd70e..f5a4c42233f0 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -676,7 +676,9 @@ static inline void put_signal_struct(struct signal_struct *sig)
+ if (atomic_dec_and_test(&sig->sigcnt))
+ free_signal_struct(sig);
+ }
+-
++#ifdef CONFIG_PREEMPT_RT_BASE
++static
++#endif
+ void __put_task_struct(struct task_struct *tsk)
+ {
+ WARN_ON(!tsk->exit_state);
+@@ -693,7 +695,18 @@ void __put_task_struct(struct task_struct *tsk)
+ if (!profile_handoff_task(tsk))
+ free_task(tsk);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ EXPORT_SYMBOL_GPL(__put_task_struct);
++#else
++void __put_task_struct_cb(struct rcu_head *rhp)
++{
++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
++
++ __put_task_struct(tsk);
++
++}
++EXPORT_SYMBOL_GPL(__put_task_struct_cb);
++#endif
+
+ void __init __weak arch_task_cache_init(void) { }
+
+--
+2.36.1
+