summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch')
-rw-r--r--debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch121
1 files changed, 121 insertions, 0 deletions
diff --git a/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch b/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
new file mode 100644
index 0000000000..f7e006261e
--- /dev/null
+++ b/debian/patches-rt/0004-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
@@ -0,0 +1,121 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Jun 2023 09:39:33 +0200
+Subject: [PATCH 4/4] ARM: vfp: Move sending signals outside of vfp_lock()ed
+ section.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.6/older/patches-6.6.7-rt18.tar.xz
+
+VFP_bounce() is invoked from within vfp_support_entry() and may send a
+signal. Sending a signal uses spinlock_t which becomes a sleeping lock
+on PREEMPT_RT and must not be acquired within a preempt-disabled
+section.
+
+Move the vfp_raise_sigfpe() block outside of the vfp_lock() section.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/vfp/vfpmodule.c | 29 ++++++++++++++++++-----------
+ 1 file changed, 18 insertions(+), 11 deletions(-)
+
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -268,7 +268,7 @@ static void vfp_panic(char *reason, u32
+ /*
+ * Process bitmask of exception conditions.
+ */
+-static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
++static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr)
+ {
+ int si_code = 0;
+
+@@ -276,8 +276,7 @@ static void vfp_raise_exceptions(u32 exc
+
+ if (exceptions == VFP_EXCEPTION_ERROR) {
+ vfp_panic("unhandled bounce", inst);
+- vfp_raise_sigfpe(FPE_FLTINV, regs);
+- return;
++ return FPE_FLTINV;
+ }
+
+ /*
+@@ -305,8 +304,7 @@ static void vfp_raise_exceptions(u32 exc
+ RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
+ RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
+
+- if (si_code)
+- vfp_raise_sigfpe(si_code, regs);
++ return si_code;
+ }
+
+ /*
+@@ -352,6 +350,8 @@ static u32 vfp_emulate_instruction(u32 i
+ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
+ {
+ u32 fpscr, orig_fpscr, fpsid, exceptions;
++ int si_code2 = 0;
++ int si_code = 0;
+
+ pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
+
+@@ -397,8 +397,8 @@ static void VFP_bounce(u32 trigger, u32
+ * unallocated VFP instruction but with FPSCR.IXE set and not
+ * on VFP subarch 1.
+ */
+- vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
+- return;
++ si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr);
++ goto exit;
+ }
+
+ /*
+@@ -422,14 +422,14 @@ static void VFP_bounce(u32 trigger, u32
+ */
+ exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
+ if (exceptions)
+- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
++ si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
+
+ /*
+ * If there isn't a second FP instruction, exit now. Note that
+ * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
+ */
+ if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
+- return;
++ goto exit;
+
+ /*
+ * The barrier() here prevents fpinst2 being read
+@@ -441,7 +441,13 @@ static void VFP_bounce(u32 trigger, u32
+ emulate:
+ exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
+ if (exceptions)
+- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
++ si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
++exit:
++ vfp_unlock();
++ if (si_code2)
++ vfp_raise_sigfpe(si_code2, regs);
++ if (si_code)
++ vfp_raise_sigfpe(si_code, regs);
+ }
+
+ static void vfp_enable(void *unused)
+@@ -773,6 +779,7 @@ static int vfp_support_entry(struct pt_r
+ * replay the instruction that trapped.
+ */
+ fmxr(FPEXC, fpexc);
++ vfp_unlock();
+ } else {
+ /* Check for synchronous or asynchronous exceptions */
+ if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
+@@ -794,10 +801,10 @@ static int vfp_support_entry(struct pt_r
+ }
+ }
+ bounce: regs->ARM_pc += 4;
++ /* VFP_bounce() will invoke vfp_unlock() */
+ VFP_bounce(trigger, fpexc, regs);
+ }
+
+- vfp_unlock();
+ return 0;
+ }
+