summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0243-powerpc-Add-support-for-lazy-preemption.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0243-powerpc-Add-support-for-lazy-preemption.patch196
1 files changed, 196 insertions, 0 deletions
diff --git a/debian/patches-rt/0243-powerpc-Add-support-for-lazy-preemption.patch b/debian/patches-rt/0243-powerpc-Add-support-for-lazy-preemption.patch
new file mode 100644
index 000000000..7c07610a1
--- /dev/null
+++ b/debian/patches-rt/0243-powerpc-Add-support-for-lazy-preemption.patch
@@ -0,0 +1,196 @@
+From 0fe19697678375c08da0c1d53231706a9df830d4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 1 Nov 2012 10:14:11 +0100
+Subject: [PATCH 243/347] powerpc: Add support for lazy preemption
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+Implement the powerpc pieces for lazy preempt.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/powerpc/Kconfig | 1 +
+ arch/powerpc/include/asm/thread_info.h | 9 +++++++--
+ arch/powerpc/kernel/asm-offsets.c | 1 +
+ arch/powerpc/kernel/entry_32.S | 17 ++++++++++++-----
+ arch/powerpc/kernel/entry_64.S | 16 ++++++++++++----
+ 5 files changed, 33 insertions(+), 11 deletions(-)
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index f4517f4be192..4536d047cdff 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -217,6 +217,7 @@ config PPC
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 3c0002044bc9..ce316076bc52 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -37,6 +37,8 @@ struct thread_info {
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
++ int preempt_lazy_count; /* 0 => preemptable,
++ <0 => BUG */
+ unsigned long local_flags; /* private flags for thread */
+ #ifdef CONFIG_LIVEPATCH
+ unsigned long *livepatch_sp;
+@@ -81,7 +83,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
+ #define TIF_SIGPENDING 1 /* signal pending */
+ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+ #define TIF_FSCHECK 3 /* Check FS is USER_DS on return */
+-#define TIF_32BIT 4 /* 32 bit binary */
++#define TIF_NEED_RESCHED_LAZY 4 /* lazy rescheduling necessary */
+ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
+ #define TIF_PATCH_PENDING 6 /* pending live patching update */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+@@ -100,6 +102,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
+ #define TIF_ELF2ABI 18 /* function descriptors must die! */
+ #endif
+ #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
++#define TIF_32BIT 20 /* 32 bit binary */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+@@ -119,6 +122,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+ #define _TIF_NOHZ (1<<TIF_NOHZ)
++#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
+ #define _TIF_FSCHECK (1<<TIF_FSCHECK)
+ #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+@@ -127,8 +131,9 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
+- _TIF_FSCHECK)
++ _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY)
+ #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
+ /* Bits in local_flags */
+ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 50400f213bbf..1bb82c5dc1d9 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -156,6 +156,7 @@ int main(void)
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
+ OFFSET(TI_PREEMPT, thread_info, preempt_count);
++ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_CPU, thread_info, cpu);
+
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 26b3f853cbf6..3783f3ef17a4 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -888,7 +888,14 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
+ bne restore
+ andi. r8,r8,_TIF_NEED_RESCHED
++ bne+ 1f
++ lwz r0,TI_PREEMPT_LAZY(r9)
++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
++ bne restore
++ lwz r0,TI_FLAGS(r9)
++ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
++1:
+ lwz r3,_MSR(r1)
+ andi. r0,r3,MSR_EE /* interrupts off? */
+ beq restore /* don't schedule if so */
+@@ -899,11 +906,11 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
+ */
+ bl trace_hardirqs_off
+ #endif
+-1: bl preempt_schedule_irq
++2: bl preempt_schedule_irq
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r3,TI_FLAGS(r9)
+- andi. r0,r3,_TIF_NEED_RESCHED
+- bne- 1b
++ andi. r0,r3,_TIF_NEED_RESCHED_MASK
++ bne- 2b
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+@@ -1232,7 +1239,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
+ #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
+
+ do_work: /* r10 contains MSR_KERNEL here */
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ beq do_user_signal
+
+ do_resched: /* r10 contains MSR_KERNEL here */
+@@ -1253,7 +1260,7 @@ do_resched: /* r10 contains MSR_KERNEL here */
+ MTMSRD(r10) /* disable interrupts */
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r9,TI_FLAGS(r9)
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ bne- do_resched
+ andi. r0,r9,_TIF_USER_WORK_MASK
+ beq restore_user
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 58b50967b3e5..ef02e41d5d96 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -176,7 +176,7 @@ system_call: /* label this so stack traces look sane */
+ * based on caller's run-mode / personality.
+ */
+ ld r11,SYS_CALL_TABLE@toc(2)
+- andi. r10,r10,_TIF_32BIT
++ andis. r10,r10,_TIF_32BIT@h
+ beq 15f
+ addi r11,r11,8 /* use 32-bit syscall entries */
+ clrldi r3,r3,32
+@@ -774,7 +774,7 @@ _GLOBAL(ret_from_except_lite)
+ bl restore_math
+ b restore
+ #endif
+-1: andi. r0,r4,_TIF_NEED_RESCHED
++1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ beq 2f
+ bl restore_interrupts
+ SCHEDULE_USER
+@@ -836,10 +836,18 @@ _GLOBAL(ret_from_except_lite)
+
+ #ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
++ lwz r8,TI_PREEMPT(r9)
++ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
++ bne restore
+ andi. r0,r4,_TIF_NEED_RESCHED
++ bne+ check_count
++
++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
++ lwz r8,TI_PREEMPT_LAZY(r9)
++
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+- lwz r8,TI_PREEMPT(r9)
++check_count:
+ cmpwi cr0,r8,0
+ bne restore
+ ld r0,SOFTE(r1)
+@@ -856,7 +864,7 @@ _GLOBAL(ret_from_except_lite)
+ /* Re-test flags and eventually loop */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r4,TI_FLAGS(r9)
+- andi. r0,r4,_TIF_NEED_RESCHED
++ andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ bne 1b
+
+ /*
+--
+2.36.1
+