summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0258-sched-Add-support-for-lazy-preemption.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0258-sched-Add-support-for-lazy-preemption.patch')
-rw-r--r--debian/patches-rt/0258-sched-Add-support-for-lazy-preemption.patch36
1 files changed, 18 insertions, 18 deletions
diff --git a/debian/patches-rt/0258-sched-Add-support-for-lazy-preemption.patch b/debian/patches-rt/0258-sched-Add-support-for-lazy-preemption.patch
index 12d064860..fc04c5d86 100644
--- a/debian/patches-rt/0258-sched-Add-support-for-lazy-preemption.patch
+++ b/debian/patches-rt/0258-sched-Add-support-for-lazy-preemption.patch
@@ -1,8 +1,8 @@
-From e8b4a64bf2eb6ff6330544895c0f69696059ffda Mon Sep 17 00:00:00 2001
+From ed95947ad84cd7475aee1d39d1070299af9e0617 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 26 Oct 2012 18:50:54 +0100
Subject: [PATCH 258/323] sched: Add support for lazy preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.215-rt107.tar.xz
It has become an obsession to mitigate the determinism vs. throughput
loss of RT. Looking at the mainline semantics of preemption points
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
12 files changed, 254 insertions(+), 36 deletions(-)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
-index fb140e00f74d..af39859f02ee 100644
+index fb140e00f74dc..af39859f02ee1 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -174,6 +174,20 @@ extern void preempt_count_sub(int val);
@@ -176,7 +176,7 @@ index fb140e00f74d..af39859f02ee 100644
#endif /* CONFIG_SMP */
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index bd0c9c633438..665a17e4f69b 100644
+index bd0c9c633438a..665a17e4f69be 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1882,6 +1882,44 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
@@ -225,7 +225,7 @@ index bd0c9c633438..665a17e4f69b 100644
{
if (task->state & (__TASK_STOPPED | __TASK_TRACED))
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
-index f3040b0b4b23..3cb02ced141b 100644
+index f3040b0b4b235..3cb02ced141b8 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -110,7 +110,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
@@ -248,7 +248,7 @@ index f3040b0b4b23..3cb02ced141b 100644
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
static inline int arch_within_stack_frames(const void * const stack,
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
-index 2151524a10f0..e7afd9fe35e5 100644
+index 2151524a10f04..e7afd9fe35e5f 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -70,6 +70,7 @@ struct trace_entry {
@@ -280,7 +280,7 @@ index 2151524a10f0..e7afd9fe35e5 100644
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
-index cbe3aa495519..b5cd1e278eb5 100644
+index cbe3aa495519b..b5cd1e278eb58 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -1,5 +1,11 @@
@@ -296,7 +296,7 @@ index cbe3aa495519..b5cd1e278eb5 100644
prompt "Preemption Model"
default PREEMPT_NONE
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 8ac1f0526476..c847d17e3b04 100644
+index 8ac1f0526476f..c847d17e3b04a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -656,6 +656,48 @@ void resched_curr(struct rq *rq)
@@ -445,7 +445,7 @@ index 8ac1f0526476..c847d17e3b04 100644
* The idle tasks have their own, simple scheduling class:
*/
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 73a89fbd81be..f4928e5b6611 100644
+index 73a89fbd81be8..f4928e5b6611b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4570,7 +4570,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
@@ -521,7 +521,7 @@ index 73a89fbd81be..f4928e5b6611 100644
check_preempt_curr(rq, p, 0);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
-index 402fd37fb340..bc2466af142e 100644
+index 402fd37fb340a..bc2466af142eb 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -47,6 +47,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
@@ -535,7 +535,7 @@ index 402fd37fb340..bc2466af142e 100644
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index d4bfc51358d3..ad854a670701 100644
+index d4bfc51358d37..ad854a670701b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1997,6 +1997,15 @@ extern void reweight_task(struct task_struct *p, int prio);
@@ -555,10 +555,10 @@ index d4bfc51358d3..ad854a670701 100644
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 02dffb1862b8..7caae85af03d 100644
+index 78a7c776fd9b7..a8285643a7cc0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2602,8 +2602,16 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
+@@ -2630,8 +2630,16 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
@@ -577,7 +577,7 @@ index 02dffb1862b8..7caae85af03d 100644
}
struct ring_buffer_event *
-@@ -3861,15 +3869,17 @@ unsigned long trace_total_entries(struct trace_array *tr)
+@@ -3889,15 +3897,17 @@ unsigned long trace_total_entries(struct trace_array *tr)
static void print_lat_help_header(struct seq_file *m)
{
@@ -604,7 +604,7 @@ index 02dffb1862b8..7caae85af03d 100644
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -3903,14 +3913,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
+@@ -3931,14 +3941,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
print_event_info(buf, m);
@@ -630,7 +630,7 @@ index 02dffb1862b8..7caae85af03d 100644
void
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index 636fb7df3714..245b8289ecdf 100644
+index 636fb7df3714a..245b8289ecdf3 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -185,6 +185,7 @@ static int trace_define_common_fields(void)
@@ -642,7 +642,7 @@ index 636fb7df3714..245b8289ecdf 100644
return ret;
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
-index b3619b21217c..5a71964ade3a 100644
+index 8bd207443f4f4..18f091dceea3f 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -451,6 +451,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
@@ -687,5 +687,5 @@ index b3619b21217c..5a71964ade3a 100644
trace_seq_printf(s, "%x", entry->migrate_disable);
else
--
-2.43.0
+2.44.0