diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0262-powerpc-Add-support-for-lazy-preemption.patch | 267 |
1 files changed, 267 insertions, 0 deletions
diff --git a/debian/patches-rt/0262-powerpc-Add-support-for-lazy-preemption.patch b/debian/patches-rt/0262-powerpc-Add-support-for-lazy-preemption.patch new file mode 100644 index 000000000..259c9928b --- /dev/null +++ b/debian/patches-rt/0262-powerpc-Add-support-for-lazy-preemption.patch @@ -0,0 +1,267 @@ +From 8e5e88a7528b5f0b0f2b9835deaf8b6db693500f Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Thu, 1 Nov 2012 10:14:11 +0100 +Subject: [PATCH 262/323] powerpc: Add support for lazy preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +Implement the powerpc pieces for lazy preempt. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com> +--- + arch/powerpc/Kconfig | 1 + + arch/powerpc/include/asm/thread_info.h | 16 ++++++++++++---- + arch/powerpc/kernel/asm-offsets.c | 1 + + arch/powerpc/kernel/entry_32.S | 23 ++++++++++++++++------- + arch/powerpc/kernel/exceptions-64e.S | 16 ++++++++++++---- + arch/powerpc/kernel/syscall_64.c | 10 +++++++--- + 6 files changed, 49 insertions(+), 18 deletions(-) + +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index b3ab6c2d9f66..6d8ce54ad6dd 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -230,6 +230,7 @@ config PPC + select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP ++ select HAVE_PREEMPT_LAZY + select MMU_GATHER_RCU_TABLE_FREE + select MMU_GATHER_PAGE_SIZE + select HAVE_REGS_AND_STACK_ACCESS_API +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h +index ff31d2fa2140..23bfe231fda3 100644 +--- a/arch/powerpc/include/asm/thread_info.h ++++ b/arch/powerpc/include/asm/thread_info.h +@@ -54,6 +54,8 @@ + struct thread_info { + int preempt_count; /* 0 => preemptable, + <0 => BUG */ ++ int preempt_lazy_count; /* 0 => preemptable, ++ <0 => BUG */ + unsigned long local_flags; /* private flags for thread */ + #ifdef CONFIG_LIVEPATCH + unsigned long *livepatch_sp; +@@ -104,11 +106,12 @@ void arch_setup_new_exec(void); + #define TIF_SINGLESTEP 8 /* singlestepping active */ + #define TIF_NOHZ 9 /* in adaptive nohz mode */ + #define TIF_SECCOMP 10 /* secure computing */ +-#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ +-#define TIF_NOERROR 12 /* Force successful syscall return */ ++ ++#define TIF_NEED_RESCHED_LAZY 11 /* lazy rescheduling necessary */ ++#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */ ++ + #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ + #define TIF_UPROBE 14 /* breakpointed or single-stepping */ +-#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ + #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation + for stack store? */ + #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ +@@ -117,6 +120,9 @@ void arch_setup_new_exec(void); + #endif + #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ + #define TIF_32BIT 20 /* 32 bit binary */ ++#define TIF_RESTOREALL 21 /* Restore all regs (implies NOERROR) */ ++#define TIF_NOERROR 22 /* Force successful syscall return */ ++ + + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +@@ -137,6 +143,7 @@ void arch_setup_new_exec(void); + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) + #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) + #define _TIF_NOHZ (1<<TIF_NOHZ) ++#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) + #define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) + #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ +@@ -145,8 +152,9 @@ void arch_setup_new_exec(void); + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \ +- _TIF_NOTIFY_SIGNAL) ++ _TIF_NEED_RESCHED_LAZY | _TIF_NOTIFY_SIGNAL) + #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + + /* Bits in local_flags */ + /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ +diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c +index 5c125255571c..597379121407 100644 +--- a/arch/powerpc/kernel/asm-offsets.c ++++ b/arch/powerpc/kernel/asm-offsets.c +@@ -189,6 +189,7 @@ int main(void) + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags); + OFFSET(TI_PREEMPT, thread_info, preempt_count); ++ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count); + + #ifdef CONFIG_PPC64 + OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size); +diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S +index 459f5d00b990..fc9517a97640 100644 +--- a/arch/powerpc/kernel/entry_32.S ++++ b/arch/powerpc/kernel/entry_32.S +@@ -414,7 +414,9 @@ ret_from_syscall: + mtmsr r10 + lwz r9,TI_FLAGS(r2) + li r8,-MAX_ERRNO +- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) ++ lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h ++ ori r0,r0, (_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l ++ and. r0,r9,r0 + bne- syscall_exit_work + cmplw 0,r3,r8 + blt+ syscall_exit_cont +@@ -530,13 +532,13 @@ syscall_dotrace: + b syscall_dotrace_cont + + syscall_exit_work: +- andi. r0,r9,_TIF_RESTOREALL ++ andis. r0,r9,_TIF_RESTOREALL@h + beq+ 0f + REST_NVGPRS(r1) + b 2f + 0: cmplw 0,r3,r8 + blt+ 1f +- andi. r0,r9,_TIF_NOERROR ++ andis. r0,r9,_TIF_NOERROR@h + bne- 1f + lwz r11,_CCR(r1) /* Load CR */ + neg r3,r3 +@@ -545,12 +547,12 @@ syscall_exit_work: + + 1: stw r6,RESULT(r1) /* Save result */ + stw r3,GPR3(r1) /* Update return value */ +-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) ++2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h + beq 4f + + /* Clear per-syscall TIF flags if any are set. */ + +- li r11,_TIF_PERSYSCALL_MASK ++ lis r11,(_TIF_PERSYSCALL_MASK)@h + addi r12,r2,TI_FLAGS + 3: lwarx r8,0,r12 + andc r8,r8,r11 +@@ -927,7 +929,14 @@ resume_kernel: + cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ + bne restore_kuap + andi. r8,r8,_TIF_NEED_RESCHED ++ bne+ 1f ++ lwz r0,TI_PREEMPT_LAZY(r2) ++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ ++ bne restore_kuap ++ lwz r0,TI_FLAGS(r2) ++ andi. r0,r0,_TIF_NEED_RESCHED_LAZY + beq+ restore_kuap ++1: + lwz r3,_MSR(r1) + andi. r0,r3,MSR_EE /* interrupts off? */ + beq restore_kuap /* don't schedule if so */ +@@ -1248,7 +1257,7 @@ global_dbcr0: + #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ + + do_work: /* r10 contains MSR_KERNEL here */ +- andi. r0,r9,_TIF_NEED_RESCHED ++ andi. r0,r9,_TIF_NEED_RESCHED_MASK + beq do_user_signal + + do_resched: /* r10 contains MSR_KERNEL here */ +@@ -1267,7 +1276,7 @@ recheck: + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) + mtmsr r10 /* disable interrupts */ + lwz r9,TI_FLAGS(r2) +- andi. r0,r9,_TIF_NEED_RESCHED ++ andi. r0,r9,_TIF_NEED_RESCHED_MASK + bne- do_resched + andi. r0,r9,_TIF_USER_WORK_MASK + beq restore_user +diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S +index f579ce46eef2..715ff292a8f8 100644 +--- a/arch/powerpc/kernel/exceptions-64e.S ++++ b/arch/powerpc/kernel/exceptions-64e.S +@@ -1080,7 +1080,7 @@ _GLOBAL(ret_from_except_lite) + li r10, -1 + mtspr SPRN_DBSR,r10 + b restore +-1: andi. r0,r4,_TIF_NEED_RESCHED ++1: andi. r0,r4,_TIF_NEED_RESCHED_MASK + beq 2f + bl restore_interrupts + SCHEDULE_USER +@@ -1132,12 +1132,20 @@ resume_kernel: + bne- 0b + 1: + +-#ifdef CONFIG_PREEMPT ++#ifdef CONFIG_PREEMPTION + /* Check if we need to preempt */ ++ lwz r8,TI_PREEMPT(r9) ++ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */ ++ bne restore + andi. r0,r4,_TIF_NEED_RESCHED ++ bne+ check_count ++ ++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY + beq+ restore ++ lwz r8,TI_PREEMPT_LAZY(r9) ++ + /* Check that preempt_count() == 0 and interrupts are enabled */ +- lwz r8,TI_PREEMPT(r9) ++check_count: + cmpwi cr0,r8,0 + bne restore + ld r0,SOFTE(r1) +@@ -1158,7 +1166,7 @@ resume_kernel: + * interrupted after loading SRR0/1. + */ + wrteei 0 +-#endif /* CONFIG_PREEMPT */ ++#endif /* CONFIG_PREEMPTION */ + + restore: + /* +diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c +index 310bcd768cd5..ae3212dcf562 100644 +--- a/arch/powerpc/kernel/syscall_64.c ++++ b/arch/powerpc/kernel/syscall_64.c +@@ -193,7 +193,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, + ti_flags = READ_ONCE(*ti_flagsp); + while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { + local_irq_enable(); +- if (ti_flags & _TIF_NEED_RESCHED) { ++ if (ti_flags & _TIF_NEED_RESCHED_MASK) { + schedule(); + } else { + /* +@@ -277,7 +277,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned + ti_flags = READ_ONCE(*ti_flagsp); + while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { + local_irq_enable(); /* returning to user: may enable */ +- if (ti_flags & _TIF_NEED_RESCHED) { ++ if (ti_flags & _TIF_NEED_RESCHED_MASK) { + schedule(); + } else { + if (ti_flags & _TIF_SIGPENDING) +@@ -361,11 +361,15 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign + /* Returning to a kernel context with local irqs enabled. */ + WARN_ON_ONCE(!(regs->msr & MSR_EE)); + again: +- if (IS_ENABLED(CONFIG_PREEMPT)) { ++ if (IS_ENABLED(CONFIG_PREEMPTION)) { + /* Return to preemptible kernel context */ + if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) { + if (preempt_count() == 0) + preempt_schedule_irq(); ++ } else if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED_LAZY)) { ++ if ((preempt_count() == 0) && ++ (current_thread_info()->preempt_lazy_count == 0)) ++ preempt_schedule_irq(); + } + } + +-- +2.43.0 + |