diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:37 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:37 +0000 |
commit | 85f603d4fd6d85c425502723a17daa94574977de (patch) | |
tree | 188a21432c3b8e8ddb8a08e9a09397164a88181c /debian/patches-rt/PREEMPT_AUTO.patch | |
parent | Merging upstream version 6.9.7. (diff) | |
download | linux-85f603d4fd6d85c425502723a17daa94574977de.tar.xz linux-85f603d4fd6d85c425502723a17daa94574977de.zip |
Adding debian version 6.9.7-1.debian/6.9.7-1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/PREEMPT_AUTO.patch')
-rw-r--r-- | debian/patches-rt/PREEMPT_AUTO.patch | 54 |
1 files changed, 27 insertions, 27 deletions
diff --git a/debian/patches-rt/PREEMPT_AUTO.patch b/debian/patches-rt/PREEMPT_AUTO.patch index 338abae7c3..f9098eb616 100644 --- a/debian/patches-rt/PREEMPT_AUTO.patch +++ b/debian/patches-rt/PREEMPT_AUTO.patch @@ -1,7 +1,7 @@ From: Thomas Gleixner <tglx@linutronix.de> Date: Sat, 23 Sep 2023 03:11:05 +0200 Subject: [PATCH] sched: define TIF_ALLOW_RESCHED -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.8/older/patches-6.8.2-rt11.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.9/older/patches-6.9-rt5.tar.xz On Fri, Sep 22 2023 at 00:55, Thomas Gleixner wrote: > On Thu, Sep 21 2023 at 09:00, Linus Torvalds wrote: @@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -277,6 +277,7 @@ config X86 +@@ -279,6 +279,7 @@ config X86 select HAVE_STATIC_CALL select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL select HAVE_PREEMPT_DYNAMIC_CALL @@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> select HAVE_SYSCALL_TRACEPOINTS --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h -@@ -81,8 +81,9 @@ struct thread_info { +@@ -87,8 +87,9 @@ struct thread_info { #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ @@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */ #define TIF_SPEC_L1D_FLUSH 10 /* Flush L1D on mm switches (processes) */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ -@@ -104,6 +105,7 @@ struct thread_info { +@@ -110,6 +111,7 @@ struct thread_info { #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) @@ -187,7 +187,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1934,17 +1934,17 @@ static inline void update_tsk_thread_fla +@@ -1938,17 +1938,17 @@ static inline void update_tsk_thread_fla update_ti_thread_flag(task_thread_info(tsk), flag, value); } @@ -208,7 +208,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { return test_ti_thread_flag(task_thread_info(tsk), flag); } -@@ -1957,9 +1957,11 @@ static inline void set_tsk_need_resched( +@@ -1961,9 +1961,11 @@ static inline void set_tsk_need_resched( static inline void clear_tsk_need_resched(struct task_struct *tsk) { clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); @@ -221,7 +221,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } -@@ -2100,7 +2102,7 @@ static inline bool preempt_model_preempt +@@ -2104,7 +2106,7 @@ static inline bool preempt_model_preempt static __always_inline bool need_resched(void) { @@ -313,7 +313,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h -@@ -178,8 +178,8 @@ unsigned int tracing_gen_ctx_irq_test(un +@@ -184,8 +184,8 @@ unsigned int tracing_gen_ctx_irq_test(un enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, @@ -324,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> TRACE_FLAG_HARDIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x10, TRACE_FLAG_PREEMPT_RESCHED = 0x20, -@@ -205,11 +205,11 @@ static inline unsigned int tracing_gen_c +@@ -211,11 +211,11 @@ static inline unsigned int tracing_gen_c static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags) { @@ -383,7 +383,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> default y if HAVE_PREEMPT_DYNAMIC_CALL --- a/kernel/entry/common.c +++ b/kernel/entry/common.c -@@ -92,7 +92,7 @@ void __weak arch_do_signal_or_restart(st +@@ -98,7 +98,7 @@ void __weak arch_do_signal_or_restart(st local_irq_enable_exit_to_user(ti_work); @@ -392,7 +392,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> schedule(); if (ti_work & _TIF_UPROBE) -@@ -301,7 +301,7 @@ void raw_irqentry_exit_cond_resched(void +@@ -307,7 +307,7 @@ void raw_irqentry_exit_cond_resched(void rcu_irq_exit_check_preempt(); if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) WARN_ON_ONCE(!on_thread_stack()); @@ -556,7 +556,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> late_initcall(sched_init_debug); --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -975,8 +975,10 @@ static void clear_buddies(struct cfs_rq +@@ -981,8 +981,10 @@ static void clear_buddies(struct cfs_rq * XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i * this is probably good enough. */ @@ -568,7 +568,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if ((s64)(se->vruntime - se->deadline) < 0) return; -@@ -995,10 +997,19 @@ static void update_deadline(struct cfs_r +@@ -1001,10 +1003,19 @@ static void update_deadline(struct cfs_r /* * The task has consumed its request, reschedule. */ @@ -591,7 +591,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } #include "pelt.h" -@@ -1153,7 +1164,7 @@ s64 update_curr_common(struct rq *rq) +@@ -1159,7 +1170,7 @@ s64 update_curr_common(struct rq *rq) /* * Update the current task's runtime statistics. */ @@ -600,7 +600,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct sched_entity *curr = cfs_rq->curr; s64 delta_exec; -@@ -1166,7 +1177,7 @@ static void update_curr(struct cfs_rq *c +@@ -1172,7 +1183,7 @@ static void update_curr(struct cfs_rq *c return; curr->vruntime += calc_delta_fair(delta_exec, curr); @@ -609,7 +609,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> update_min_vruntime(cfs_rq); if (entity_is_task(curr)) -@@ -1175,6 +1186,11 @@ static void update_curr(struct cfs_rq *c +@@ -1181,6 +1192,11 @@ static void update_curr(struct cfs_rq *c account_cfs_rq_runtime(cfs_rq, delta_exec); } @@ -621,7 +621,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> static void update_curr_fair(struct rq *rq) { update_curr(cfs_rq_of(&rq->curr->se)); -@@ -5493,7 +5509,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -5505,7 +5521,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc /* * Update run-time statistics of the 'current'. */ @@ -630,7 +630,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Ensure that runnable average is periodically updated. -@@ -5507,7 +5523,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -5519,7 +5535,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc * validating it and just reschedule. */ if (queued) { @@ -639,7 +639,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return; } /* -@@ -5653,7 +5669,7 @@ static void __account_cfs_rq_runtime(str +@@ -5665,7 +5681,7 @@ static void __account_cfs_rq_runtime(str * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -648,7 +648,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static __always_inline -@@ -5913,7 +5929,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cf +@@ -5925,7 +5941,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cf /* Determine whether we need to wake up potentially idle CPU: */ if (rq->curr == rq->idle && rq->cfs.nr_running) @@ -657,7 +657,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } #ifdef CONFIG_SMP -@@ -6628,7 +6644,7 @@ static void hrtick_start_fair(struct rq +@@ -6640,7 +6656,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (task_current(rq, p)) @@ -666,7 +666,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return; } hrtick_start(rq, delta); -@@ -8304,7 +8320,7 @@ static void check_preempt_wakeup_fair(st +@@ -8316,7 +8332,7 @@ static void check_preempt_wakeup_fair(st * prevents us from potentially nominating it as a false LAST_BUDDY * below. */ @@ -675,7 +675,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return; /* Idle tasks are by definition preempted by non-idle tasks. */ -@@ -8346,7 +8362,7 @@ static void check_preempt_wakeup_fair(st +@@ -8358,7 +8374,7 @@ static void check_preempt_wakeup_fair(st return; preempt: @@ -684,7 +684,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } #ifdef CONFIG_SMP -@@ -12516,7 +12532,7 @@ static inline void task_tick_core(struct +@@ -12504,7 +12520,7 @@ static inline void task_tick_core(struct */ if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 && __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) @@ -693,7 +693,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -12681,7 +12697,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -12669,7 +12685,7 @@ prio_changed_fair(struct rq *rq, struct */ if (task_current(rq, p)) { if (p->prio > oldprio) @@ -724,7 +724,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -2463,6 +2463,7 @@ extern void init_sched_fair_class(void); +@@ -2465,6 +2465,7 @@ extern void init_sched_fair_class(void); extern void reweight_task(struct task_struct *p, int prio); extern void resched_curr(struct rq *rq); @@ -734,7 +734,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> extern struct rt_bandwidth def_rt_bandwidth; --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2717,6 +2717,8 @@ unsigned int tracing_gen_ctx_irq_test(un +@@ -2513,6 +2513,8 @@ unsigned int tracing_gen_ctx_irq_test(un if (tif_need_resched()) trace_flags |= TRACE_FLAG_NEED_RESCHED; |