summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:38 +0000
commit08b74a000942a380fe028845f92cd3a0dee827d5 (patch)
treeaa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
parentAdding upstream version 4.19.249. (diff)
downloadlinux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz
linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip
Adding debian version 4.19.249-2.debian/4.19.249-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch')
-rw-r--r--debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch148
1 files changed, 148 insertions, 0 deletions
diff --git a/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch b/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
new file mode 100644
index 000000000..e250e2aa4
--- /dev/null
+++ b/debian/patches-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
@@ -0,0 +1,148 @@
+From e19a4689e9aa66751202caf5aeade89ecb118f2c Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 7 Jan 2019 13:52:31 +0100
+Subject: [PATCH 011/347] sched/fair: Robustify CFS-bandwidth timer locking
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+Traditionally hrtimer callbacks were run with IRQs disabled, but with
+the introduction of HRTIMER_MODE_SOFT it is possible they run from
+SoftIRQ context, which does _NOT_ have IRQs disabled.
+
+Allow for the CFS bandwidth timers (period_timer and slack_timer) to
+be ran from SoftIRQ context; this entails removing the assumption that
+IRQs are already disabled from the locking.
+
+While mainline doesn't strictly need this, -RT forces all timers not
+explicitly marked with MODE_HARD into MODE_SOFT and trips over this.
+And marking these timers as MODE_HARD doesn't make sense as they're
+not required for RT operation and can potentially be quite expensive.
+
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reported-by: Tom Putzeys <tom.putzeys@be.atlascopco.com>
+Tested-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20190107125231.GE14122@hirez.programming.kicks-ass.net
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/fair.c | 30 ++++++++++++++++--------------
+ 1 file changed, 16 insertions(+), 14 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 84e7efda98da..e84a056f783f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4597,7 +4597,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
+ struct rq *rq = rq_of(cfs_rq);
+ struct rq_flags rf;
+
+- rq_lock(rq, &rf);
++ rq_lock_irqsave(rq, &rf);
+ if (!cfs_rq_throttled(cfs_rq))
+ goto next;
+
+@@ -4616,7 +4616,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
+ unthrottle_cfs_rq(cfs_rq);
+
+ next:
+- rq_unlock(rq, &rf);
++ rq_unlock_irqrestore(rq, &rf);
+
+ if (!remaining)
+ break;
+@@ -4632,7 +4632,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
+ * period the timer is deactivated until scheduling resumes; cfs_b->idle is
+ * used to track this state.
+ */
+-static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
++static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
+ {
+ u64 runtime;
+ int throttled;
+@@ -4672,10 +4672,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
+ while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
+ runtime = cfs_b->runtime;
+ cfs_b->distribute_running = 1;
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+ /* we can't nest cfs_b->lock while distributing bandwidth */
+ runtime = distribute_cfs_runtime(cfs_b, runtime);
+- raw_spin_lock(&cfs_b->lock);
++ raw_spin_lock_irqsave(&cfs_b->lock, flags);
+
+ cfs_b->distribute_running = 0;
+ throttled = !list_empty(&cfs_b->throttled_cfs_rq);
+@@ -4783,16 +4783,17 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
+ {
+ u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
++ unsigned long flags;
+
+ /* confirm we're still not at a refresh boundary */
+- raw_spin_lock(&cfs_b->lock);
++ raw_spin_lock_irqsave(&cfs_b->lock, flags);
+ if (cfs_b->distribute_running) {
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+ return;
+ }
+
+ if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+ return;
+ }
+
+@@ -4802,17 +4803,17 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
+ if (runtime)
+ cfs_b->distribute_running = 1;
+
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+
+ if (!runtime)
+ return;
+
+ runtime = distribute_cfs_runtime(cfs_b, runtime);
+
+- raw_spin_lock(&cfs_b->lock);
++ raw_spin_lock_irqsave(&cfs_b->lock, flags);
+ cfs_b->runtime -= min(runtime, cfs_b->runtime);
+ cfs_b->distribute_running = 0;
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+ }
+
+ /*
+@@ -4892,11 +4893,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
+ {
+ struct cfs_bandwidth *cfs_b =
+ container_of(timer, struct cfs_bandwidth, period_timer);
++ unsigned long flags;
+ int overrun;
+ int idle = 0;
+ int count = 0;
+
+- raw_spin_lock(&cfs_b->lock);
++ raw_spin_lock_irqsave(&cfs_b->lock, flags);
+ for (;;) {
+ overrun = hrtimer_forward_now(timer, cfs_b->period);
+ if (!overrun)
+@@ -4932,11 +4934,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
+ count = 0;
+ }
+
+- idle = do_sched_cfs_period_timer(cfs_b, overrun);
++ idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
+ }
+ if (idle)
+ cfs_b->period_active = 0;
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+
+ return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
+ }
+--
+2.36.1
+