diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0218-arm-Enable-highmem-for-rt.patch | 184 |
1 files changed, 184 insertions, 0 deletions
diff --git a/debian/patches-rt/0218-arm-Enable-highmem-for-rt.patch b/debian/patches-rt/0218-arm-Enable-highmem-for-rt.patch new file mode 100644 index 000000000..6d37c0670 --- /dev/null +++ b/debian/patches-rt/0218-arm-Enable-highmem-for-rt.patch @@ -0,0 +1,184 @@ +From d8647f71379a704607e15bb3a7fd5d84b3f10a6d Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Wed, 13 Feb 2013 11:03:11 +0100 +Subject: [PATCH 218/347] arm: Enable highmem for rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +fixup highmem for ARM. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + arch/arm/include/asm/switch_to.h | 8 +++++ + arch/arm/mm/highmem.c | 56 +++++++++++++++++++++++++++----- + include/linux/highmem.h | 1 + + 3 files changed, 57 insertions(+), 8 deletions(-) + +diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h +index d3e937dcee4d..6ab96a2ce1f8 100644 +--- a/arch/arm/include/asm/switch_to.h ++++ b/arch/arm/include/asm/switch_to.h +@@ -4,6 +4,13 @@ + + #include <linux/thread_info.h> + ++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + /* + * For v7 SMP cores running a preemptible kernel we may be pre-empted + * during a TLB maintenance operation, so execute an inner-shareable dsb +@@ -26,6 +33,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info + #define switch_to(prev,next,last) \ + do { \ + __complete_pending_tlbi(); \ ++ switch_kmaps(prev, next); \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ + } while (0) + +diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c +index eb4b225d28c9..542692dbd40a 100644 +--- a/arch/arm/mm/highmem.c ++++ b/arch/arm/mm/highmem.c +@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr) + return *ptep; + } + ++static unsigned int fixmap_idx(int type) ++{ ++ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++} ++ + void *kmap(struct page *page) + { + might_sleep(); +@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap); + + void *kmap_atomic(struct page *page) + { ++ pte_t pte = mk_pte(page, kmap_prot); + unsigned int idx; + unsigned long vaddr; + void *kmap; + int type; + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); +@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page) + + type = kmap_atomic_idx_push(); + +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + vaddr = __fix_to_virt(idx); + #ifdef CONFIG_DEBUG_HIGHMEM + /* +@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page) + * in place, so the contained TLB flush ensures the TLB is updated + * with the new mapping. + */ +- set_fixmap_pte(idx, mk_pte(page, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_fixmap_pte(idx, pte); + + return (void *)vaddr; + } +@@ -106,10 +115,13 @@ void __kunmap_atomic(void *kvaddr) + + if (kvaddr >= (void *)FIXADDR_START) { + type = kmap_atomic_idx(); +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(idx)); + #else +@@ -122,28 +134,56 @@ void __kunmap_atomic(void *kvaddr) + kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); + } + pagefault_enable(); +- preempt_enable(); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(__kunmap_atomic); + + void *kmap_atomic_pfn(unsigned long pfn) + { ++ pte_t pte = pfn_pte(pfn, kmap_prot); + unsigned long vaddr; + int idx, type; + struct page *page = pfn_to_page(pfn); + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + type = kmap_atomic_idx_push(); +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + vaddr = __fix_to_virt(idx); + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); + #endif +- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_fixmap_pte(idx, pte); + + return (void *)vaddr; + } ++#if defined CONFIG_PREEMPT_RT_FULL ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = fixmap_idx(i); ++ ++ set_fixmap_pte(idx, __pte(0)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = fixmap_idx(i); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_fixmap_pte(idx, next_p->kmap_pte[i]); ++ } ++} ++#endif +diff --git a/include/linux/highmem.h b/include/linux/highmem.h +index 1ac89e4718bf..eaa2ef9bc10e 100644 +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -8,6 +8,7 @@ + #include <linux/mm.h> + #include <linux/uaccess.h> + #include <linux/hardirq.h> ++#include <linux/sched.h> + + #include <asm/cacheflush.h> + +-- +2.36.1 + |