diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0018-sched-Add-migrate_disable-tracepoints.patch | 110 |
1 files changed, 110 insertions, 0 deletions
diff --git a/debian/patches-rt/0018-sched-Add-migrate_disable-tracepoints.patch b/debian/patches-rt/0018-sched-Add-migrate_disable-tracepoints.patch new file mode 100644 index 000000000..0f315ce9d --- /dev/null +++ b/debian/patches-rt/0018-sched-Add-migrate_disable-tracepoints.patch @@ -0,0 +1,110 @@ +From 8b4906578ce683b9bce7df984b4179519152345f Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra <peterz@infradead.org> +Date: Fri, 23 Oct 2020 12:12:15 +0200 +Subject: [PATCH 018/323] sched: Add migrate_disable() tracepoints +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +XXX write a tracer: + + - 'migirate_disable() -> migrate_enable()' time in task_sched_runtime() + - 'migrate_pull -> sched-in' time in task_sched_runtime() + +The first will give worst case for the second, which is the actual +interference experienced by the task to due migration constraints of +migrate_disable(). + +Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/trace/events/sched.h | 12 ++++++++++++ + kernel/sched/core.c | 4 ++++ + kernel/sched/deadline.c | 1 + + kernel/sched/rt.c | 8 +++++++- + 4 files changed, 24 insertions(+), 1 deletion(-) + +diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h +index c96a4337afe6..e48f584abf5f 100644 +--- a/include/trace/events/sched.h ++++ b/include/trace/events/sched.h +@@ -650,6 +650,18 @@ DECLARE_TRACE(sched_update_nr_running_tp, + TP_PROTO(struct rq *rq, int change), + TP_ARGS(rq, change)); + ++DECLARE_TRACE(sched_migrate_disable_tp, ++ TP_PROTO(struct task_struct *p), ++ TP_ARGS(p)); ++ ++DECLARE_TRACE(sched_migrate_enable_tp, ++ TP_PROTO(struct task_struct *p), ++ TP_ARGS(p)); ++ ++DECLARE_TRACE(sched_migrate_pull_tp, ++ TP_PROTO(struct task_struct *p), ++ TP_ARGS(p)); ++ + #endif /* _TRACE_SCHED_H */ + + /* This part must be outside protection */ +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 452fc1dfb143..b5f35b512577 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1742,6 +1742,8 @@ void migrate_disable(void) + return; + } + ++ trace_sched_migrate_disable_tp(p); ++ + preempt_disable(); + this_rq()->nr_pinned++; + p->migration_disabled = 1; +@@ -1774,6 +1776,8 @@ void migrate_enable(void) + p->migration_disabled = 0; + this_rq()->nr_pinned--; + preempt_enable(); ++ ++ trace_sched_migrate_enable_tp(p); + } + EXPORT_SYMBOL_GPL(migrate_enable); + +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index 7cf3248894a9..fcf546cd2eac 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -2282,6 +2282,7 @@ static void pull_dl_task(struct rq *this_rq) + goto skip; + + if (is_migration_disabled(p)) { ++ trace_sched_migrate_pull_tp(p); + push_task = get_push_task(src_rq); + } else { + deactivate_task(src_rq, p, 0); +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index c25e35f41555..c5038d680c2c 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -1891,7 +1891,12 @@ static int push_rt_task(struct rq *rq, bool pull) + struct task_struct *push_task = NULL; + int cpu; + +- if (!pull || rq->push_busy) ++ if (!pull) ++ return 0; ++ ++ trace_sched_migrate_pull_tp(next_task); ++ ++ if (rq->push_busy) + return 0; + + cpu = find_lowest_rq(rq->curr); +@@ -2237,6 +2242,7 @@ static void pull_rt_task(struct rq *this_rq) + goto skip; + + if (is_migration_disabled(p)) { ++ trace_sched_migrate_pull_tp(p); + push_task = get_push_task(src_rq); + } else { + deactivate_task(src_rq, p, 0); +-- +2.43.0 + |