diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0104-posix-timers-Thread-posix-cpu-timers-on-rt.patch | 269 |
1 files changed, 269 insertions, 0 deletions
diff --git a/debian/patches-rt/0104-posix-timers-Thread-posix-cpu-timers-on-rt.patch b/debian/patches-rt/0104-posix-timers-Thread-posix-cpu-timers-on-rt.patch new file mode 100644 index 000000000..36a78b986 --- /dev/null +++ b/debian/patches-rt/0104-posix-timers-Thread-posix-cpu-timers-on-rt.patch @@ -0,0 +1,269 @@ +From 59ac8673b0cb63baa55b3058ba0cb36ecb14a9d7 Mon Sep 17 00:00:00 2001 +From: John Stultz <johnstul@us.ibm.com> +Date: Fri, 3 Jul 2009 08:29:58 -0500 +Subject: [PATCH 104/347] posix-timers: Thread posix-cpu-timers on -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +posix-cpu-timer code takes non -rt safe locks in hard irq +context. Move it to a thread. + +[ 3.0 fixes from Peter Zijlstra <peterz@infradead.org> ] + +Signed-off-by: John Stultz <johnstul@us.ibm.com> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + include/linux/sched.h | 3 + + init/init_task.c | 7 ++ + kernel/fork.c | 3 + + kernel/time/posix-cpu-timers.c | 154 ++++++++++++++++++++++++++++++++- + 4 files changed, 164 insertions(+), 3 deletions(-) + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 56047f57ebc7..5369eb1fd456 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -832,6 +832,9 @@ struct task_struct { + #ifdef CONFIG_POSIX_TIMERS + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *posix_timer_list; ++#endif + #endif + + /* Process credentials: */ +diff --git a/init/init_task.c b/init/init_task.c +index d71054b95528..634becebd713 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -50,6 +50,12 @@ static struct sighand_struct init_sighand = { + .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh), + }; + ++#if defined(CONFIG_POSIX_TIMERS) && defined(CONFIG_PREEMPT_RT_BASE) ++# define INIT_TIMER_LIST .posix_timer_list = NULL, ++#else ++# define INIT_TIMER_LIST ++#endif ++ + /* + * Set up the first task table, touch at your own risk!. Base=0, + * limit=0x1fffff (=2MB) +@@ -119,6 +125,7 @@ struct task_struct init_task + INIT_CPU_TIMERS(init_task) + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock), + .timer_slack_ns = 50000, /* 50 usec default slack */ ++ INIT_TIMER_LIST + .thread_pid = &init_struct_pid, + .thread_group = LIST_HEAD_INIT(init_task.thread_group), + .thread_node = LIST_HEAD_INIT(init_signals.thread_head), +diff --git a/kernel/fork.c b/kernel/fork.c +index a18d695259af..c27c196fd70e 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1587,6 +1587,9 @@ static void rt_mutex_init_task(struct task_struct *p) + */ + static void posix_cpu_timers_init(struct task_struct *tsk) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ tsk->posix_timer_list = NULL; ++#endif + tsk->cputime_expires.prof_exp = 0; + tsk->cputime_expires.virt_exp = 0; + tsk->cputime_expires.sched_exp = 0; +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +index bfaa44a80c03..2d29b269dc83 100644 +--- a/kernel/time/posix-cpu-timers.c ++++ b/kernel/time/posix-cpu-timers.c +@@ -3,8 +3,10 @@ + * Implement CPU time clocks for the POSIX clock interface. + */ + ++#include <uapi/linux/sched/types.h> + #include <linux/sched/signal.h> + #include <linux/sched/cputime.h> ++#include <linux/sched/rt.h> + #include <linux/posix-timers.h> + #include <linux/errno.h> + #include <linux/math64.h> +@@ -15,6 +17,7 @@ + #include <linux/workqueue.h> + #include <linux/compat.h> + #include <linux/sched/deadline.h> ++#include <linux/smpboot.h> + + #include "posix-timers.h" + +@@ -1140,14 +1143,12 @@ static inline int fastpath_timer_check(struct task_struct *tsk) + * already updated our counts. We need to check if any timers fire now. + * Interrupts are disabled. + */ +-void run_posix_cpu_timers(struct task_struct *tsk) ++static void __run_posix_cpu_timers(struct task_struct *tsk) + { + LIST_HEAD(firing); + struct k_itimer *timer, *next; + unsigned long flags; + +- lockdep_assert_irqs_disabled(); +- + /* + * The fast path checks that there are no expired thread or thread + * group timers. If that's so, just return. +@@ -1200,6 +1201,153 @@ void run_posix_cpu_timers(struct task_struct *tsk) + } + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++#include <linux/kthread.h> ++#include <linux/cpu.h> ++DEFINE_PER_CPU(struct task_struct *, posix_timer_task); ++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); ++DEFINE_PER_CPU(bool, posix_timer_th_active); ++ ++static void posix_cpu_kthread_fn(unsigned int cpu) ++{ ++ struct task_struct *tsk = NULL; ++ struct task_struct *next = NULL; ++ ++ BUG_ON(per_cpu(posix_timer_task, cpu) != current); ++ ++ /* grab task list */ ++ raw_local_irq_disable(); ++ tsk = per_cpu(posix_timer_tasklist, cpu); ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ raw_local_irq_enable(); ++ ++ /* its possible the list is empty, just return */ ++ if (!tsk) ++ return; ++ ++ /* Process task list */ ++ while (1) { ++ /* save next */ ++ next = tsk->posix_timer_list; ++ ++ /* run the task timers, clear its ptr and ++ * unreference it ++ */ ++ __run_posix_cpu_timers(tsk); ++ tsk->posix_timer_list = NULL; ++ put_task_struct(tsk); ++ ++ /* check if this is the last on the list */ ++ if (next == tsk) ++ break; ++ tsk = next; ++ } ++} ++ ++static inline int __fastpath_timer_check(struct task_struct *tsk) ++{ ++ /* tsk == current, ensure it is safe to use ->signal/sighand */ ++ if (unlikely(tsk->exit_state)) ++ return 0; ++ ++ if (!task_cputime_zero(&tsk->cputime_expires)) ++ return 1; ++ ++ if (!task_cputime_zero(&tsk->signal->cputime_expires)) ++ return 1; ++ ++ return 0; ++} ++ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ unsigned int cpu = smp_processor_id(); ++ struct task_struct *tasklist; ++ ++ BUG_ON(!irqs_disabled()); ++ ++ if (per_cpu(posix_timer_th_active, cpu) != true) ++ return; ++ ++ /* get per-cpu references */ ++ tasklist = per_cpu(posix_timer_tasklist, cpu); ++ ++ /* check to see if we're already queued */ ++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { ++ get_task_struct(tsk); ++ if (tasklist) { ++ tsk->posix_timer_list = tasklist; ++ } else { ++ /* ++ * The list is terminated by a self-pointing ++ * task_struct ++ */ ++ tsk->posix_timer_list = tsk; ++ } ++ per_cpu(posix_timer_tasklist, cpu) = tsk; ++ ++ wake_up_process(per_cpu(posix_timer_task, cpu)); ++ } ++} ++ ++static int posix_cpu_kthread_should_run(unsigned int cpu) ++{ ++ return __this_cpu_read(posix_timer_tasklist) != NULL; ++} ++ ++static void posix_cpu_kthread_park(unsigned int cpu) ++{ ++ this_cpu_write(posix_timer_th_active, false); ++} ++ ++static void posix_cpu_kthread_unpark(unsigned int cpu) ++{ ++ this_cpu_write(posix_timer_th_active, true); ++} ++ ++static void posix_cpu_kthread_setup(unsigned int cpu) ++{ ++ struct sched_param sp; ++ ++ sp.sched_priority = MAX_RT_PRIO - 1; ++ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); ++ posix_cpu_kthread_unpark(cpu); ++} ++ ++static struct smp_hotplug_thread posix_cpu_thread = { ++ .store = &posix_timer_task, ++ .thread_should_run = posix_cpu_kthread_should_run, ++ .thread_fn = posix_cpu_kthread_fn, ++ .thread_comm = "posixcputmr/%u", ++ .setup = posix_cpu_kthread_setup, ++ .park = posix_cpu_kthread_park, ++ .unpark = posix_cpu_kthread_unpark, ++}; ++ ++static int __init posix_cpu_thread_init(void) ++{ ++ /* Start one for boot CPU. */ ++ unsigned long cpu; ++ int ret; ++ ++ /* init the per-cpu posix_timer_tasklets */ ++ for_each_possible_cpu(cpu) ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ ++ ret = smpboot_register_percpu_thread(&posix_cpu_thread); ++ WARN_ON(ret); ++ ++ return 0; ++} ++early_initcall(posix_cpu_thread_init); ++#else /* CONFIG_PREEMPT_RT_BASE */ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ lockdep_assert_irqs_disabled(); ++ __run_posix_cpu_timers(tsk); ++} ++#endif /* CONFIG_PREEMPT_RT_BASE */ ++ + /* + * Set one of the process-wide special case CPU timers or RLIMIT_CPU. + * The tsk->sighand->siglock must be held by the caller. +-- +2.36.1 + |