summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0238-x86-Support-for-lazy-preemption.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0238-x86-Support-for-lazy-preemption.patch')
-rw-r--r--debian/patches-rt/0238-x86-Support-for-lazy-preemption.patch236
1 files changed, 236 insertions, 0 deletions
diff --git a/debian/patches-rt/0238-x86-Support-for-lazy-preemption.patch b/debian/patches-rt/0238-x86-Support-for-lazy-preemption.patch
new file mode 100644
index 000000000..563d97c84
--- /dev/null
+++ b/debian/patches-rt/0238-x86-Support-for-lazy-preemption.patch
@@ -0,0 +1,236 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 1 Nov 2012 11:03:47 +0100
+Subject: [PATCH 238/342] x86: Support for lazy preemption
+Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ebf6918cf9f47fbdd3414aacbc7091e776654392
+
+Implement the x86 pieces for lazy preempt.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/Kconfig | 1 +
+ arch/x86/entry/common.c | 4 ++--
+ arch/x86/entry/entry_32.S | 17 ++++++++++++++++
+ arch/x86/entry/entry_64.S | 16 +++++++++++++++
+ arch/x86/include/asm/preempt.h | 31 +++++++++++++++++++++++++++++-
+ arch/x86/include/asm/thread_info.h | 11 +++++++++++
+ arch/x86/kernel/asm-offsets.c | 2 ++
+ 7 files changed, 79 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 6ee7220e7f47..7299aeaa7cbb 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -180,6 +180,7 @@ config X86
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_RCU_TABLE_FREE if PARAVIRT
+ select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
+ select HAVE_REGS_AND_STACK_ACCESS_API
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 91676b0d2d4c..3b5e41d9b29d 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -134,7 +134,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
+
+ #define EXIT_TO_USERMODE_LOOP_FLAGS \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
++ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
+
+ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
+ {
+@@ -149,7 +149,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
+ /* We have work to do. */
+ local_irq_enable();
+
+- if (cached_flags & _TIF_NEED_RESCHED)
++ if (cached_flags & _TIF_NEED_RESCHED_MASK)
+ schedule();
+
+ #ifdef ARCH_RT_DELAYS_SIGNAL_SEND
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 37d9016d4768..324b7539eaab 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -750,8 +750,25 @@ END(ret_from_exception)
+ ENTRY(resume_kernel)
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ .Lneed_resched:
++ # preempt count == 0 + NEED_RS set?
+ cmpl $0, PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+ jnz restore_all_kernel
++#else
++ jz test_int_off
++
++ # atleast preempt count == 0 ?
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ jne restore_all
++
++ movl PER_CPU_VAR(current_task), %ebp
++ cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
++ jnz restore_all
++
++ testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
++ jz restore_all
++ test_int_off:
++#endif
+ testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
+ jz restore_all_kernel
+ call preempt_schedule_irq
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index f53d995b1370..94fccaa04bfa 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -733,7 +733,23 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
+ btl $9, EFLAGS(%rsp) /* were interrupts off? */
+ jnc 1f
+ 0: cmpl $0, PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+ jnz 1f
++#else
++ jz do_preempt_schedule_irq
++
++ # atleast preempt count == 0 ?
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ jnz 1f
++
++ movq PER_CPU_VAR(current_task), %rcx
++ cmpl $0, TASK_TI_preempt_lazy_count(%rcx)
++ jnz 1f
++
++ btl $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx)
++ jnc 1f
++do_preempt_schedule_irq:
++#endif
+ call preempt_schedule_irq
+ jmp 0b
+ 1:
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index 7f2dbd91fc74..22992c837795 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -86,17 +86,46 @@ static __always_inline void __preempt_count_sub(int val)
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
+-static __always_inline bool __preempt_count_dec_and_test(void)
++static __always_inline bool ____preempt_count_dec_and_test(void)
+ {
+ GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
+ }
+
++static __always_inline bool __preempt_count_dec_and_test(void)
++{
++ if (____preempt_count_dec_and_test())
++ return true;
++#ifdef CONFIG_PREEMPT_LAZY
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
++ return false;
++#endif
++}
++
+ /*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+ static __always_inline bool should_resched(int preempt_offset)
+ {
++#ifdef CONFIG_PREEMPT_LAZY
++ u32 tmp;
++
++ tmp = raw_cpu_read_4(__preempt_count);
++ if (tmp == preempt_offset)
++ return true;
++
++ /* preempt count == 0 ? */
++ tmp &= ~PREEMPT_NEED_RESCHED;
++ if (tmp)
++ return false;
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
++#endif
+ }
+
+ #ifdef CONFIG_PREEMPT
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index b5e4c357523e..d311dc042ddd 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -56,17 +56,24 @@ struct task_struct;
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ u32 status; /* thread synchronous flags */
++ int preempt_lazy_count; /* 0 => lazy preemptable
++ <0 => BUG */
+ };
+
+ #define INIT_THREAD_INFO(tsk) \
+ { \
+ .flags = 0, \
++ .preempt_lazy_count = 0, \
+ }
+
+ #else /* !__ASSEMBLY__ */
+
+ #include <asm/asm-offsets.h>
+
++#define GET_THREAD_INFO(reg) \
++ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
++ _ASM_SUB $(THREAD_SIZE),reg ;
++
+ #endif
+
+ /*
+@@ -91,6 +98,7 @@ struct thread_info {
+ #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
+ #define TIF_NOTSC 16 /* TSC is not accessible in userland */
+ #define TIF_IA32 17 /* IA32 compatibility process */
++#define TIF_NEED_RESCHED_LAZY 18 /* lazy rescheduling necessary */
+ #define TIF_NOHZ 19 /* in adaptive nohz mode */
+ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */
+ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
+@@ -120,6 +128,7 @@ struct thread_info {
+ #define _TIF_NOCPUID (1 << TIF_NOCPUID)
+ #define _TIF_NOTSC (1 << TIF_NOTSC)
+ #define _TIF_IA32 (1 << TIF_IA32)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_NOHZ (1 << TIF_NOHZ)
+ #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+ #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
+@@ -165,6 +174,8 @@ struct thread_info {
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
++
+ #define STACK_WARN (THREAD_SIZE/8)
+
+ /*
+diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
+index 01de31db300d..ce1c5b9fbd8c 100644
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -38,6 +38,7 @@ void common(void) {
+
+ BLANK();
+ OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
++ OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count);
+ OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
+
+ BLANK();
+@@ -94,6 +95,7 @@ void common(void) {
+
+ BLANK();
+ DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
++ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
+
+ /* TLB state for the entry code */
+ OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);