diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0128-sched-vtime-Consolidate-IRQ-time-accounting.patch | 303 |
1 files changed, 303 insertions, 0 deletions
diff --git a/debian/patches-rt/0128-sched-vtime-Consolidate-IRQ-time-accounting.patch b/debian/patches-rt/0128-sched-vtime-Consolidate-IRQ-time-accounting.patch new file mode 100644 index 000000000..bb1e03d33 --- /dev/null +++ b/debian/patches-rt/0128-sched-vtime-Consolidate-IRQ-time-accounting.patch @@ -0,0 +1,303 @@ +From 749e0a8d4b10240d5834e135f34c47d107aa1442 Mon Sep 17 00:00:00 2001 +From: Frederic Weisbecker <frederic@kernel.org> +Date: Wed, 2 Dec 2020 12:57:30 +0100 +Subject: [PATCH 128/323] sched/vtime: Consolidate IRQ time accounting +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +The 3 architectures implementing CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +all have their own version of irq time accounting that dispatch the +cputime to the appropriate index: hardirq, softirq, system, idle, +guest... from an all-in-one function. + +Instead of having these ad-hoc versions, move the cputime destination +dispatch decision to the core code and leave only the actual per-index +cputime accounting to the architecture. + +Signed-off-by: Frederic Weisbecker <frederic@kernel.org> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Link: https://lore.kernel.org/r/20201202115732.27827-4-frederic@kernel.org +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + arch/ia64/kernel/time.c | 20 ++++++++++---- + arch/powerpc/kernel/time.c | 56 +++++++++++++++++++++++++++----------- + arch/s390/kernel/vtime.c | 45 +++++++++++++++++++++--------- + include/linux/vtime.h | 16 ++++------- + kernel/sched/cputime.c | 13 ++++++--- + 5 files changed, 102 insertions(+), 48 deletions(-) + +diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c +index 7abc5f37bfaf..733e0e3324b8 100644 +--- a/arch/ia64/kernel/time.c ++++ b/arch/ia64/kernel/time.c +@@ -138,12 +138,8 @@ void vtime_account_kernel(struct task_struct *tsk) + struct thread_info *ti = task_thread_info(tsk); + __u64 stime = vtime_delta(tsk); + +- if ((tsk->flags & PF_VCPU) && !irq_count()) ++ if (tsk->flags & PF_VCPU) + ti->gtime += stime; +- else if (hardirq_count()) +- ti->hardirq_time += stime; +- else if (in_serving_softirq()) +- ti->softirq_time += stime; + else + ti->stime += stime; + } +@@ -156,6 +152,20 @@ void vtime_account_idle(struct task_struct *tsk) + ti->idle_time += vtime_delta(tsk); + } + ++void vtime_account_softirq(struct task_struct *tsk) ++{ ++ struct thread_info *ti = task_thread_info(tsk); ++ ++ ti->softirq_time += vtime_delta(tsk); ++} ++ ++void vtime_account_hardirq(struct task_struct *tsk) ++{ ++ struct thread_info *ti = task_thread_info(tsk); ++ ++ ti->hardirq_time += vtime_delta(tsk); ++} ++ + #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ + + static irqreturn_t +diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c +index ba9b54d35f57..588e081ca55b 100644 +--- a/arch/powerpc/kernel/time.c ++++ b/arch/powerpc/kernel/time.c +@@ -312,12 +312,11 @@ static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct, + return stime_scaled; + } + +-static unsigned long vtime_delta(struct task_struct *tsk, ++static unsigned long vtime_delta(struct cpu_accounting_data *acct, + unsigned long *stime_scaled, + unsigned long *steal_time) + { + unsigned long now, stime; +- struct cpu_accounting_data *acct = get_accounting(tsk); + + WARN_ON_ONCE(!irqs_disabled()); + +@@ -332,29 +331,30 @@ static unsigned long vtime_delta(struct task_struct *tsk, + return stime; + } + ++static void vtime_delta_kernel(struct cpu_accounting_data *acct, ++ unsigned long *stime, unsigned long *stime_scaled) ++{ ++ unsigned long steal_time; ++ ++ *stime = vtime_delta(acct, stime_scaled, &steal_time); ++ *stime -= min(*stime, steal_time); ++ acct->steal_time += steal_time; ++} ++ + void vtime_account_kernel(struct task_struct *tsk) + { +- unsigned long stime, stime_scaled, steal_time; + struct cpu_accounting_data *acct = get_accounting(tsk); ++ unsigned long stime, stime_scaled; + +- stime = vtime_delta(tsk, &stime_scaled, &steal_time); +- +- stime -= min(stime, steal_time); +- acct->steal_time += steal_time; ++ vtime_delta_kernel(acct, &stime, &stime_scaled); + +- if ((tsk->flags & PF_VCPU) && !irq_count()) { ++ if (tsk->flags & PF_VCPU) { + acct->gtime += stime; + #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME + acct->utime_scaled += stime_scaled; + #endif + } else { +- if (hardirq_count()) +- acct->hardirq_time += stime; +- else if (in_serving_softirq()) +- acct->softirq_time += stime; +- else +- acct->stime += stime; +- ++ acct->stime += stime; + #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME + acct->stime_scaled += stime_scaled; + #endif +@@ -367,10 +367,34 @@ void vtime_account_idle(struct task_struct *tsk) + unsigned long stime, stime_scaled, steal_time; + struct cpu_accounting_data *acct = get_accounting(tsk); + +- stime = vtime_delta(tsk, &stime_scaled, &steal_time); ++ stime = vtime_delta(acct, &stime_scaled, &steal_time); + acct->idle_time += stime + steal_time; + } + ++static void vtime_account_irq_field(struct cpu_accounting_data *acct, ++ unsigned long *field) ++{ ++ unsigned long stime, stime_scaled; ++ ++ vtime_delta_kernel(acct, &stime, &stime_scaled); ++ *field += stime; ++#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME ++ acct->stime_scaled += stime_scaled; ++#endif ++} ++ ++void vtime_account_softirq(struct task_struct *tsk) ++{ ++ struct cpu_accounting_data *acct = get_accounting(tsk); ++ vtime_account_irq_field(acct, &acct->softirq_time); ++} ++ ++void vtime_account_hardirq(struct task_struct *tsk) ++{ ++ struct cpu_accounting_data *acct = get_accounting(tsk); ++ vtime_account_irq_field(acct, &acct->hardirq_time); ++} ++ + static void vtime_flush_scaled(struct task_struct *tsk, + struct cpu_accounting_data *acct) + { +diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c +index 18a97631af43..9b3c5978b668 100644 +--- a/arch/s390/kernel/vtime.c ++++ b/arch/s390/kernel/vtime.c +@@ -223,31 +223,50 @@ void vtime_flush(struct task_struct *tsk) + S390_lowcore.avg_steal_timer = avg_steal; + } + ++static u64 vtime_delta(void) ++{ ++ u64 timer = S390_lowcore.last_update_timer; ++ ++ S390_lowcore.last_update_timer = get_vtimer(); ++ ++ return timer - S390_lowcore.last_update_timer; ++} ++ + /* + * Update process times based on virtual cpu times stored by entry.S + * to the lowcore fields user_timer, system_timer & steal_clock. + */ + void vtime_account_kernel(struct task_struct *tsk) + { +- u64 timer; +- +- timer = S390_lowcore.last_update_timer; +- S390_lowcore.last_update_timer = get_vtimer(); +- timer -= S390_lowcore.last_update_timer; ++ u64 delta = vtime_delta(); + +- if ((tsk->flags & PF_VCPU) && (irq_count() == 0)) +- S390_lowcore.guest_timer += timer; +- else if (hardirq_count()) +- S390_lowcore.hardirq_timer += timer; +- else if (in_serving_softirq()) +- S390_lowcore.softirq_timer += timer; ++ if (tsk->flags & PF_VCPU) ++ S390_lowcore.guest_timer += delta; + else +- S390_lowcore.system_timer += timer; ++ S390_lowcore.system_timer += delta; + +- virt_timer_forward(timer); ++ virt_timer_forward(delta); + } + EXPORT_SYMBOL_GPL(vtime_account_kernel); + ++void vtime_account_softirq(struct task_struct *tsk) ++{ ++ u64 delta = vtime_delta(); ++ ++ S390_lowcore.softirq_timer += delta; ++ ++ virt_timer_forward(delta); ++} ++ ++void vtime_account_hardirq(struct task_struct *tsk) ++{ ++ u64 delta = vtime_delta(); ++ ++ S390_lowcore.hardirq_timer += delta; ++ ++ virt_timer_forward(delta); ++} ++ + /* + * Sorted add to a list. List is linear searched until first bigger + * element is found. +diff --git a/include/linux/vtime.h b/include/linux/vtime.h +index 2cdeca062db3..6c9867419615 100644 +--- a/include/linux/vtime.h ++++ b/include/linux/vtime.h +@@ -83,16 +83,12 @@ static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } + #endif + + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +-extern void vtime_account_irq_enter(struct task_struct *tsk); +-static inline void vtime_account_irq_exit(struct task_struct *tsk) +-{ +- /* On hard|softirq exit we always account to hard|softirq cputime */ +- vtime_account_kernel(tsk); +-} ++extern void vtime_account_irq(struct task_struct *tsk); ++extern void vtime_account_softirq(struct task_struct *tsk); ++extern void vtime_account_hardirq(struct task_struct *tsk); + extern void vtime_flush(struct task_struct *tsk); + #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ +-static inline void vtime_account_irq_enter(struct task_struct *tsk) { } +-static inline void vtime_account_irq_exit(struct task_struct *tsk) { } ++static inline void vtime_account_irq(struct task_struct *tsk) { } + static inline void vtime_flush(struct task_struct *tsk) { } + #endif + +@@ -105,13 +101,13 @@ static inline void irqtime_account_irq(struct task_struct *tsk) { } + + static inline void account_irq_enter_time(struct task_struct *tsk) + { +- vtime_account_irq_enter(tsk); ++ vtime_account_irq(tsk); + irqtime_account_irq(tsk); + } + + static inline void account_irq_exit_time(struct task_struct *tsk) + { +- vtime_account_irq_exit(tsk); ++ vtime_account_irq(tsk); + irqtime_account_irq(tsk); + } + +diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c +index 1d78b835ec8b..60129af16ed6 100644 +--- a/kernel/sched/cputime.c ++++ b/kernel/sched/cputime.c +@@ -417,13 +417,18 @@ void vtime_task_switch(struct task_struct *prev) + } + # endif + +-void vtime_account_irq_enter(struct task_struct *tsk) ++void vtime_account_irq(struct task_struct *tsk) + { +- if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) && +- !in_interrupt() && is_idle_task(tsk)) ++ if (hardirq_count()) { ++ vtime_account_hardirq(tsk); ++ } else if (in_serving_softirq()) { ++ vtime_account_softirq(tsk); ++ } else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) && ++ is_idle_task(tsk)) { + vtime_account_idle(tsk); +- else ++ } else { + vtime_account_kernel(tsk); ++ } + } + + void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, +-- +2.43.0 + |