diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:06:00 +0000 |
commit | b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch) | |
tree | 1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0029-highmem-Provide-generic-variant-of-kmap_atomic.patch | |
parent | Adding upstream version 5.10.209. (diff) | |
download | linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip |
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0029-highmem-Provide-generic-variant-of-kmap_atomic.patch')
-rw-r--r-- | debian/patches-rt/0029-highmem-Provide-generic-variant-of-kmap_atomic.patch | 346 |
1 files changed, 346 insertions, 0 deletions
diff --git a/debian/patches-rt/0029-highmem-Provide-generic-variant-of-kmap_atomic.patch b/debian/patches-rt/0029-highmem-Provide-generic-variant-of-kmap_atomic.patch new file mode 100644 index 000000000..828708904 --- /dev/null +++ b/debian/patches-rt/0029-highmem-Provide-generic-variant-of-kmap_atomic.patch @@ -0,0 +1,346 @@ +From 4e1b14787f7a2c71a9347db23c402f5dbe2da206 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Tue, 3 Nov 2020 10:27:18 +0100 +Subject: [PATCH 029/323] highmem: Provide generic variant of kmap_atomic* +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +The kmap_atomic* interfaces in all architectures are pretty much the same +except for post map operations (flush) and pre- and post unmap operations. + +Provide a generic variant for that. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Cc: Andrew Morton <akpm@linux-foundation.org> +Cc: linux-mm@kvack.org +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/highmem.h | 82 ++++++++++++++++++----- + mm/Kconfig | 3 + + mm/highmem.c | 144 +++++++++++++++++++++++++++++++++++++++- + 3 files changed, 211 insertions(+), 18 deletions(-) + +diff --git a/include/linux/highmem.h b/include/linux/highmem.h +index 3297bfca78ed..14d5b4020c8c 100644 +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -31,9 +31,16 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) + + #include <asm/kmap_types.h> + ++/* ++ * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft. ++ */ ++#ifdef CONFIG_KMAP_LOCAL ++void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); ++void *__kmap_local_page_prot(struct page *page, pgprot_t prot); ++void kunmap_local_indexed(void *vaddr); ++#endif ++ + #ifdef CONFIG_HIGHMEM +-extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); +-extern void kunmap_atomic_high(void *kvaddr); + #include <asm/highmem.h> + + #ifndef ARCH_HAS_KMAP_FLUSH_TLB +@@ -81,6 +88,11 @@ static inline void kunmap(struct page *page) + * be used in IRQ contexts, so in some (very limited) cases we need + * it. + */ ++ ++#ifndef CONFIG_KMAP_LOCAL ++void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); ++void kunmap_atomic_high(void *kvaddr); ++ + static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) + { + preempt_disable(); +@@ -89,7 +101,38 @@ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) + return page_address(page); + return kmap_atomic_high_prot(page, prot); + } +-#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot) ++ ++static inline void __kunmap_atomic(void *vaddr) ++{ ++ kunmap_atomic_high(vaddr); ++} ++#else /* !CONFIG_KMAP_LOCAL */ ++ ++static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) ++{ ++ preempt_disable(); ++ pagefault_disable(); ++ return __kmap_local_page_prot(page, prot); ++} ++ ++static inline void *kmap_atomic_pfn(unsigned long pfn) ++{ ++ preempt_disable(); ++ pagefault_disable(); ++ return __kmap_local_pfn_prot(pfn, kmap_prot); ++} ++ ++static inline void __kunmap_atomic(void *addr) ++{ ++ kunmap_local_indexed(addr); ++} ++ ++#endif /* CONFIG_KMAP_LOCAL */ ++ ++static inline void *kmap_atomic(struct page *page) ++{ ++ return kmap_atomic_prot(page, kmap_prot); ++} + + /* declarations for linux/mm/highmem.c */ + unsigned int nr_free_highpages(void); +@@ -147,25 +190,33 @@ static inline void *kmap_atomic(struct page *page) + pagefault_disable(); + return page_address(page); + } +-#define kmap_atomic_prot(page, prot) kmap_atomic(page) + +-static inline void kunmap_atomic_high(void *addr) ++static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) ++{ ++ return kmap_atomic(page); ++} ++ ++static inline void *kmap_atomic_pfn(unsigned long pfn) ++{ ++ return kmap_atomic(pfn_to_page(pfn)); ++} ++ ++static inline void __kunmap_atomic(void *addr) + { + /* + * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic() +- * handles re-enabling faults + preemption ++ * handles re-enabling faults and preemption + */ + #ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(addr); + #endif + } + +-#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) +- + #define kmap_flush_unused() do {} while(0) + + #endif /* CONFIG_HIGHMEM */ + ++#if !defined(CONFIG_KMAP_LOCAL) + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + + DECLARE_PER_CPU(int, __kmap_atomic_idx); +@@ -196,22 +247,21 @@ static inline void kmap_atomic_idx_pop(void) + __this_cpu_dec(__kmap_atomic_idx); + #endif + } +- ++#endif + #endif + + /* + * Prevent people trying to call kunmap_atomic() as if it were kunmap() + * kunmap_atomic() should get the return value of kmap_atomic, not the page. + */ +-#define kunmap_atomic(addr) \ +-do { \ +- BUILD_BUG_ON(__same_type((addr), struct page *)); \ +- kunmap_atomic_high(addr); \ +- pagefault_enable(); \ +- preempt_enable(); \ ++#define kunmap_atomic(__addr) \ ++do { \ ++ BUILD_BUG_ON(__same_type((__addr), struct page *)); \ ++ __kunmap_atomic(__addr); \ ++ pagefault_enable(); \ ++ preempt_enable(); \ + } while (0) + +- + /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ + #ifndef clear_user_highpage + static inline void clear_user_highpage(struct page *page, unsigned long vaddr) +diff --git a/mm/Kconfig b/mm/Kconfig +index 390165ffbb0f..8c49d09da214 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -859,4 +859,7 @@ config ARCH_HAS_HUGEPD + config MAPPING_DIRTY_HELPERS + bool + ++config KMAP_LOCAL ++ bool ++ + endmenu +diff --git a/mm/highmem.c b/mm/highmem.c +index 6abfd762eee7..bb4ce13ee7e7 100644 +--- a/mm/highmem.c ++++ b/mm/highmem.c +@@ -31,9 +31,11 @@ + #include <asm/tlbflush.h> + #include <linux/vmalloc.h> + ++#ifndef CONFIG_KMAP_LOCAL + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + DEFINE_PER_CPU(int, __kmap_atomic_idx); + #endif ++#endif + + /* + * Virtual_count is not a pure "count". +@@ -365,9 +367,147 @@ void kunmap_high(struct page *page) + if (need_wakeup) + wake_up(pkmap_map_wait); + } +- + EXPORT_SYMBOL(kunmap_high); +-#endif /* CONFIG_HIGHMEM */ ++#endif /* CONFIG_HIGHMEM */ ++ ++#ifdef CONFIG_KMAP_LOCAL ++ ++#include <asm/kmap_size.h> ++ ++static DEFINE_PER_CPU(int, __kmap_local_idx); ++ ++static inline int kmap_local_idx_push(void) ++{ ++ int idx = __this_cpu_inc_return(__kmap_local_idx) - 1; ++ ++ WARN_ON_ONCE(in_irq() && !irqs_disabled()); ++ BUG_ON(idx >= KM_MAX_IDX); ++ return idx; ++} ++ ++static inline int kmap_local_idx(void) ++{ ++ return __this_cpu_read(__kmap_local_idx) - 1; ++} ++ ++static inline void kmap_local_idx_pop(void) ++{ ++ int idx = __this_cpu_dec_return(__kmap_local_idx); ++ ++ BUG_ON(idx < 0); ++} ++ ++#ifndef arch_kmap_local_post_map ++# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0) ++#endif ++#ifndef arch_kmap_local_pre_unmap ++# define arch_kmap_local_pre_unmap(vaddr) do { } while (0) ++#endif ++ ++#ifndef arch_kmap_local_post_unmap ++# define arch_kmap_local_post_unmap(vaddr) do { } while (0) ++#endif ++ ++#ifndef arch_kmap_local_map_idx ++#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) ++#endif ++ ++#ifndef arch_kmap_local_unmap_idx ++#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx) ++#endif ++ ++#ifndef arch_kmap_local_high_get ++static inline void *arch_kmap_local_high_get(struct page *page) ++{ ++ return NULL; ++} ++#endif ++ ++/* Unmap a local mapping which was obtained by kmap_high_get() */ ++static inline void kmap_high_unmap_local(unsigned long vaddr) ++{ ++#ifdef ARCH_NEEDS_KMAP_HIGH_GET ++ if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) ++ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); ++#endif ++} ++ ++static inline int kmap_local_calc_idx(int idx) ++{ ++ return idx + KM_MAX_IDX * smp_processor_id(); ++} ++ ++static pte_t *__kmap_pte; ++ ++static pte_t *kmap_get_pte(void) ++{ ++ if (!__kmap_pte) ++ __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); ++ return __kmap_pte; ++} ++ ++void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) ++{ ++ pte_t pteval, *kmap_pte = kmap_get_pte(); ++ unsigned long vaddr; ++ int idx; ++ ++ preempt_disable(); ++ idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); ++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ BUG_ON(!pte_none(*(kmap_pte - idx))); ++ pteval = pfn_pte(pfn, prot); ++ set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval); ++ arch_kmap_local_post_map(vaddr, pteval); ++ preempt_enable(); ++ ++ return (void *)vaddr; ++} ++EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot); ++ ++void *__kmap_local_page_prot(struct page *page, pgprot_t prot) ++{ ++ void *kmap; ++ ++ if (!PageHighMem(page)) ++ return page_address(page); ++ ++ /* Try kmap_high_get() if architecture has it enabled */ ++ kmap = arch_kmap_local_high_get(page); ++ if (kmap) ++ return kmap; ++ ++ return __kmap_local_pfn_prot(page_to_pfn(page), prot); ++} ++EXPORT_SYMBOL(__kmap_local_page_prot); ++ ++void kunmap_local_indexed(void *vaddr) ++{ ++ unsigned long addr = (unsigned long) vaddr & PAGE_MASK; ++ pte_t *kmap_pte = kmap_get_pte(); ++ int idx; ++ ++ if (addr < __fix_to_virt(FIX_KMAP_END) || ++ addr > __fix_to_virt(FIX_KMAP_BEGIN)) { ++ WARN_ON_ONCE(addr < PAGE_OFFSET); ++ ++ /* Handle mappings which were obtained by kmap_high_get() */ ++ kmap_high_unmap_local(addr); ++ return; ++ } ++ ++ preempt_disable(); ++ idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); ++ WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++ ++ arch_kmap_local_pre_unmap(addr); ++ pte_clear(&init_mm, addr, kmap_pte - idx); ++ arch_kmap_local_post_unmap(addr); ++ kmap_local_idx_pop(); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(kunmap_local_indexed); ++#endif + + #if defined(HASHED_PAGE_VIRTUAL) + +-- +2.43.0 + |