summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch')
-rw-r--r--debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch66
1 files changed, 66 insertions, 0 deletions
diff --git a/debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch b/debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch
new file mode 100644
index 0000000000..ed4998a37a
--- /dev/null
+++ b/debian/patches-rt/0004-perf-Shrink-the-size-of-the-recursion-counter.patch
@@ -0,0 +1,66 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 4 Jul 2024 19:03:38 +0200
+Subject: [PATCH 4/7] perf: Shrink the size of the recursion counter.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz
+
+There are four recursion counter, one for each context. The type of the
+counter is `int' but the counter is used as `bool' since it is only
+incremented if zero.
+The main goal here is to shrink the whole struct into 32bit int which
+can later be added task_struct into an existing hole.
+
+Reduce the type of the recursion counter to an unsigned char, keep the
+increment/ decrement operation.
+
+Tested-by: Marco Elver <elver@google.com>
+Link: https://lore.kernel.org/20240621091601.18227-1-frederic@kernel.org
+Link: https://lore.kernel.org/r/20240704170424.1466941-5-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/events/callchain.c | 2 +-
+ kernel/events/core.c | 2 +-
+ kernel/events/internal.h | 4 ++--
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/events/callchain.c
++++ b/kernel/events/callchain.c
+@@ -29,7 +29,7 @@ static inline size_t perf_callchain_entr
+ sysctl_perf_event_max_contexts_per_stack));
+ }
+
+-static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
++static DEFINE_PER_CPU(u8, callchain_recursion[PERF_NR_CONTEXTS]);
+ static atomic_t nr_callchain_events;
+ static DEFINE_MUTEX(callchain_mutex);
+ static struct callchain_cpus_entries *callchain_cpus_entries;
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9776,7 +9776,7 @@ struct swevent_htable {
+ int hlist_refcount;
+
+ /* Recursion avoidance in each contexts */
+- int recursion[PERF_NR_CONTEXTS];
++ u8 recursion[PERF_NR_CONTEXTS];
+ };
+
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -208,7 +208,7 @@ arch_perf_out_copy_user(void *dst, const
+
+ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
+
+-static inline int get_recursion_context(int *recursion)
++static inline int get_recursion_context(u8 *recursion)
+ {
+ unsigned char rctx = interrupt_context_level();
+
+@@ -221,7 +221,7 @@ static inline int get_recursion_context(
+ return rctx;
+ }
+
+-static inline void put_recursion_context(int *recursion, int rctx)
++static inline void put_recursion_context(u8 *recursion, int rctx)
+ {
+ barrier();
+ recursion[rctx]--;