diff options
Diffstat (limited to 'debian/patches-rt/PREEMPT_AUTO.patch')
-rw-r--r-- | debian/patches-rt/PREEMPT_AUTO.patch | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/debian/patches-rt/PREEMPT_AUTO.patch b/debian/patches-rt/PREEMPT_AUTO.patch index 859dadc20c..323b18f798 100644 --- a/debian/patches-rt/PREEMPT_AUTO.patch +++ b/debian/patches-rt/PREEMPT_AUTO.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner <tglx@linutronix.de> Date: Sat, 23 Sep 2023 03:11:05 +0200 Subject: [PATCH] sched: define TIF_ALLOW_RESCHED -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.7/older/patches-6.7-rt6.tar.xz On Fri, Sep 22 2023 at 00:55, Thomas Gleixner wrote: > On Thu, Sep 21 2023 at 09:00, Linus Torvalds wrote: @@ -187,7 +187,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -2050,17 +2050,17 @@ static inline void update_tsk_thread_fla +@@ -2055,17 +2055,17 @@ static inline void update_tsk_thread_fla update_ti_thread_flag(task_thread_info(tsk), flag, value); } @@ -208,7 +208,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { return test_ti_thread_flag(task_thread_info(tsk), flag); } -@@ -2073,9 +2073,11 @@ static inline void set_tsk_need_resched( +@@ -2078,9 +2078,11 @@ static inline void set_tsk_need_resched( static inline void clear_tsk_need_resched(struct task_struct *tsk) { clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); @@ -221,7 +221,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } -@@ -2256,7 +2258,7 @@ static inline int rwlock_needbreak(rwloc +@@ -2261,7 +2263,7 @@ static inline int rwlock_needbreak(rwloc static __always_inline bool need_resched(void) { @@ -434,15 +434,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* @@ -922,7 +923,7 @@ static bool set_nr_if_polling(struct tas - for (;;) { + do { if (!(val & _TIF_POLLING_NRFLAG)) return false; - if (val & _TIF_NEED_RESCHED) + if (val & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) return true; - if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)) - break; -@@ -931,9 +932,9 @@ static bool set_nr_if_polling(struct tas + } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)); + +@@ -930,9 +931,9 @@ static bool set_nr_if_polling(struct tas } #else @@ -454,7 +454,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return true; } -@@ -1038,28 +1039,47 @@ void wake_up_q(struct wake_q_head *head) +@@ -1037,28 +1038,47 @@ void wake_up_q(struct wake_q_head *head) * might also involve a cross-CPU call to trigger the scheduler on * the target CPU. */ @@ -510,7 +510,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } void resched_cpu(int cpu) -@@ -1132,7 +1152,7 @@ static void wake_up_idle_cpu(int cpu) +@@ -1131,7 +1151,7 @@ static void wake_up_idle_cpu(int cpu) if (cpu == smp_processor_id()) return; @@ -556,7 +556,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> late_initcall(sched_init_debug); --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -1016,8 +1016,10 @@ static void clear_buddies(struct cfs_rq +@@ -1001,8 +1001,10 @@ static void clear_buddies(struct cfs_rq * XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i * this is probably good enough. */ @@ -568,7 +568,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if ((s64)(se->vruntime - se->deadline) < 0) return; -@@ -1036,10 +1038,19 @@ static void update_deadline(struct cfs_r +@@ -1021,10 +1023,19 @@ static void update_deadline(struct cfs_r /* * The task has consumed its request, reschedule. */ @@ -591,7 +591,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } #include "pelt.h" -@@ -1147,7 +1158,7 @@ static void update_tg_load_avg(struct cf +@@ -1132,7 +1143,7 @@ static void update_tg_load_avg(struct cf /* * Update the current task's runtime statistics. */ @@ -600,7 +600,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct sched_entity *curr = cfs_rq->curr; u64 now = rq_clock_task(rq_of(cfs_rq)); -@@ -1174,7 +1185,7 @@ static void update_curr(struct cfs_rq *c +@@ -1159,7 +1170,7 @@ static void update_curr(struct cfs_rq *c schedstat_add(cfs_rq->exec_clock, delta_exec); curr->vruntime += calc_delta_fair(delta_exec, curr); @@ -609,7 +609,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> update_min_vruntime(cfs_rq); if (entity_is_task(curr)) { -@@ -1188,6 +1199,11 @@ static void update_curr(struct cfs_rq *c +@@ -1173,6 +1184,11 @@ static void update_curr(struct cfs_rq *c account_cfs_rq_runtime(cfs_rq, delta_exec); } @@ -621,7 +621,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> static void update_curr_fair(struct rq *rq) { update_curr(cfs_rq_of(&rq->curr->se)); -@@ -5398,7 +5414,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -5449,7 +5465,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc /* * Update run-time statistics of the 'current'. */ @@ -630,7 +630,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Ensure that runnable average is periodically updated. -@@ -5412,7 +5428,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -5463,7 +5479,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc * validating it and just reschedule. */ if (queued) { @@ -639,7 +639,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return; } /* -@@ -5558,7 +5574,7 @@ static void __account_cfs_rq_runtime(str +@@ -5609,7 +5625,7 @@ static void __account_cfs_rq_runtime(str * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -648,7 +648,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static __always_inline -@@ -5818,7 +5834,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cf +@@ -5869,7 +5885,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cf /* Determine whether we need to wake up potentially idle CPU: */ if (rq->curr == rq->idle && rq->cfs.nr_running) @@ -657,7 +657,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } #ifdef CONFIG_SMP -@@ -6523,7 +6539,7 @@ static void hrtick_start_fair(struct rq +@@ -6584,7 +6600,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (task_current(rq, p)) @@ -666,7 +666,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return; } hrtick_start(rq, delta); -@@ -8175,7 +8191,7 @@ static void check_preempt_wakeup(struct +@@ -8240,7 +8256,7 @@ static void check_preempt_wakeup_fair(st * prevents us from potentially nominating it as a false LAST_BUDDY * below. */ @@ -675,7 +675,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return; /* Idle tasks are by definition preempted by non-idle tasks. */ -@@ -8217,7 +8233,7 @@ static void check_preempt_wakeup(struct +@@ -8282,7 +8298,7 @@ static void check_preempt_wakeup_fair(st return; preempt: @@ -684,7 +684,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } #ifdef CONFIG_SMP -@@ -12374,7 +12390,7 @@ static inline void task_tick_core(struct +@@ -12449,7 +12465,7 @@ static inline void task_tick_core(struct */ if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 && __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) @@ -693,18 +693,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -12539,7 +12555,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -12614,7 +12630,7 @@ prio_changed_fair(struct rq *rq, struct */ if (task_current(rq, p)) { if (p->prio > oldprio) - resched_curr(rq); + resched_curr_lazy(rq); } else - check_preempt_curr(rq, p, 0); + wakeup_preempt(rq, p, 0); } --- a/kernel/sched/features.h +++ b/kernel/sched/features.h -@@ -89,3 +89,5 @@ SCHED_FEAT(UTIL_EST_FASTUP, true) +@@ -88,3 +88,5 @@ SCHED_FEAT(UTIL_EST_FASTUP, true) SCHED_FEAT(LATENCY_WARN, false) SCHED_FEAT(HZ_BW, true) @@ -724,7 +724,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -2435,6 +2435,7 @@ extern void init_sched_fair_class(void); +@@ -2419,6 +2419,7 @@ extern void init_sched_fair_class(void); extern void reweight_task(struct task_struct *p, int prio); extern void resched_curr(struct rq *rq); @@ -734,7 +734,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> extern struct rt_bandwidth def_rt_bandwidth; --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2722,6 +2722,8 @@ unsigned int tracing_gen_ctx_irq_test(un +@@ -2723,6 +2723,8 @@ unsigned int tracing_gen_ctx_irq_test(un if (tif_need_resched()) trace_flags |= TRACE_FLAG_NEED_RESCHED; |