summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0002-drm-amd-display-Simplify-the-per-CPU-usage.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0002-drm-amd-display-Simplify-the-per-CPU-usage.patch127
1 files changed, 127 insertions, 0 deletions
diff --git a/debian/patches-rt/0002-drm-amd-display-Simplify-the-per-CPU-usage.patch b/debian/patches-rt/0002-drm-amd-display-Simplify-the-per-CPU-usage.patch
new file mode 100644
index 000000000..fd18d53e2
--- /dev/null
+++ b/debian/patches-rt/0002-drm-amd-display-Simplify-the-per-CPU-usage.patch
@@ -0,0 +1,127 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 21 Sep 2023 16:15:13 +0200
+Subject: [PATCH 2/5] drm/amd/display: Simplify the per-CPU usage.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+The fpu_recursion_depth counter is used to ensure that dc_fpu_begin()
+can be invoked multiple times while the FPU-disable function itself is
+only invoked once. Also the counter part (dc_fpu_end()) is ballanced
+properly.
+
+Instead of using the get_cpu_ptr() dance around the inc it is simpler to
+increment the per-CPU variable directly. Also the per-CPU variable has
+to be incremented and decremented on the same CPU. This is ensured by
+the inner-part which disables preemption. This is kind of not obvious,
+works and the preempt-counter is touched a few times for no reason.
+
+Disable preemption before incrementing fpu_recursion_depth for the first
+time. Keep preemption disabled until dc_fpu_end() where the counter is
+decremented making it obvious that the preemption has to stay disabled
+while the counter is non-zero.
+Use simple inc/dec functions.
+Remove the nested preempt_disable/enable functions which are now not
+needed.
+
+Link: https://lore.kernel.org/r/20230921141516.520471-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 50 ++++++++++---------------
+ 1 file changed, 20 insertions(+), 30 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+@@ -60,11 +60,9 @@ static DEFINE_PER_CPU(int, fpu_recursion
+ */
+ inline void dc_assert_fp_enabled(void)
+ {
+- int *pcpu, depth = 0;
++ int depth;
+
+- pcpu = get_cpu_ptr(&fpu_recursion_depth);
+- depth = *pcpu;
+- put_cpu_ptr(&fpu_recursion_depth);
++ depth = __this_cpu_read(fpu_recursion_depth);
+
+ ASSERT(depth >= 1);
+ }
+@@ -84,32 +82,27 @@ inline void dc_assert_fp_enabled(void)
+ */
+ void dc_fpu_begin(const char *function_name, const int line)
+ {
+- int *pcpu;
++ int depth;
+
+- pcpu = get_cpu_ptr(&fpu_recursion_depth);
+- *pcpu += 1;
++ preempt_disable();
++ depth = __this_cpu_inc_return(fpu_recursion_depth);
+
+- if (*pcpu == 1) {
++ if (depth == 1) {
+ #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+ kernel_fpu_begin();
+ #elif defined(CONFIG_PPC64)
+- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+- preempt_disable();
++ if (cpu_has_feature(CPU_FTR_VSX_COMP))
+ enable_kernel_vsx();
+- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
+- preempt_disable();
++ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
+ enable_kernel_altivec();
+- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
+- preempt_disable();
++ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
+ enable_kernel_fp();
+- }
+ #elif defined(CONFIG_ARM64)
+ kernel_neon_begin();
+ #endif
+ }
+
+- TRACE_DCN_FPU(true, function_name, line, *pcpu);
+- put_cpu_ptr(&fpu_recursion_depth);
++ TRACE_DCN_FPU(true, function_name, line, depth);
+ }
+
+ /**
+@@ -124,29 +117,26 @@ void dc_fpu_begin(const char *function_n
+ */
+ void dc_fpu_end(const char *function_name, const int line)
+ {
+- int *pcpu;
++ int depth;
+
+- pcpu = get_cpu_ptr(&fpu_recursion_depth);
+- *pcpu -= 1;
+- if (*pcpu <= 0) {
++ depth = __this_cpu_dec_return(fpu_recursion_depth);
++ if (depth == 0) {
+ #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+ kernel_fpu_end();
+ #elif defined(CONFIG_PPC64)
+- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
++ if (cpu_has_feature(CPU_FTR_VSX_COMP))
+ disable_kernel_vsx();
+- preempt_enable();
+- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
++ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
+ disable_kernel_altivec();
+- preempt_enable();
+- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
++ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
+ disable_kernel_fp();
+- preempt_enable();
+- }
+ #elif defined(CONFIG_ARM64)
+ kernel_neon_end();
+ #endif
++ } else {
++ WARN_ON_ONCE(depth < 0);
+ }
+
+- TRACE_DCN_FPU(false, function_name, line, *pcpu);
+- put_cpu_ptr(&fpu_recursion_depth);
++ TRACE_DCN_FPU(false, function_name, line, depth);
++ preempt_enable();
+ }