summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0048-mm-highmem-Provide-kmap_local.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0048-mm-highmem-Provide-kmap_local.patch207
1 files changed, 207 insertions, 0 deletions
diff --git a/debian/patches-rt/0048-mm-highmem-Provide-kmap_local.patch b/debian/patches-rt/0048-mm-highmem-Provide-kmap_local.patch
new file mode 100644
index 000000000..aa15ab4ec
--- /dev/null
+++ b/debian/patches-rt/0048-mm-highmem-Provide-kmap_local.patch
@@ -0,0 +1,207 @@
+From 452c4c8536ea017ed0f82287834e7cfa2f751488 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:37 +0100
+Subject: [PATCH 048/323] mm/highmem: Provide kmap_local*
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+Now that the kmap atomic index is stored in task struct provide a
+preemptible variant. On context switch the maps of an outgoing task are
+removed and the map of the incoming task are restored. That's obviously
+slow, but highmem is slow anyway.
+
+The kmap_local.*() functions can be invoked from both preemptible and
+atomic context. kmap local sections disable migration to keep the resulting
+virtual mapping address correct, but disable neither pagefaults nor
+preemption.
+
+A wholesale conversion of kmap_atomic to be fully preemptible is not
+possible because some of the usage sites might rely on the preemption
+disable for serialization or on the implicit pagefault disable. Needs to be
+done on a case by case basis.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/highmem-internal.h | 48 ++++++++++++++++++++++++++++++++
+ include/linux/highmem.h | 43 +++++++++++++++++-----------
+ mm/highmem.c | 6 ++++
+ 3 files changed, 81 insertions(+), 16 deletions(-)
+
+diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
+index c5a22177db85..1bbe96dc8be6 100644
+--- a/include/linux/highmem-internal.h
++++ b/include/linux/highmem-internal.h
+@@ -68,6 +68,26 @@ static inline void kmap_flush_unused(void)
+ __kmap_flush_unused();
+ }
+
++static inline void *kmap_local_page(struct page *page)
++{
++ return __kmap_local_page_prot(page, kmap_prot);
++}
++
++static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
++{
++ return __kmap_local_page_prot(page, prot);
++}
++
++static inline void *kmap_local_pfn(unsigned long pfn)
++{
++ return __kmap_local_pfn_prot(pfn, kmap_prot);
++}
++
++static inline void __kunmap_local(void *vaddr)
++{
++ kunmap_local_indexed(vaddr);
++}
++
+ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
+ preempt_disable();
+@@ -140,6 +160,28 @@ static inline void kunmap(struct page *page)
+ #endif
+ }
+
++static inline void *kmap_local_page(struct page *page)
++{
++ return page_address(page);
++}
++
++static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
++{
++ return kmap_local_page(page);
++}
++
++static inline void *kmap_local_pfn(unsigned long pfn)
++{
++ return kmap_local_page(pfn_to_page(pfn));
++}
++
++static inline void __kunmap_local(void *addr)
++{
++#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
++ kunmap_flush_on_unmap(addr);
++#endif
++}
++
+ static inline void *kmap_atomic(struct page *page)
+ {
+ preempt_disable();
+@@ -181,4 +223,10 @@ do { \
+ __kunmap_atomic(__addr); \
+ } while (0)
+
++#define kunmap_local(__addr) \
++do { \
++ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
++ __kunmap_local(__addr); \
++} while (0)
++
+ #endif
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index 5c888525b4c5..7a3c6d4b79d8 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(void *addr);
+ static inline void kmap_flush_unused(void);
+
+ /**
+- * kmap_atomic - Atomically map a page for temporary usage
++ * kmap_local_page - Map a page for temporary usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+- * Side effect: On return pagefaults and preemption are disabled.
+- *
+ * Can be invoked from any context.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation:
+ *
+- * addr1 = kmap_atomic(page1);
+- * addr2 = kmap_atomic(page2);
++ * addr1 = kmap_local_page(page1);
++ * addr2 = kmap_local_page(page2);
+ * ...
+- * kunmap_atomic(addr2);
+- * kunmap_atomic(addr1);
++ * kunmap_local(addr2);
++ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(void);
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+- * While it is significantly faster than kmap() it comes with restrictions
+- * about the pointer validity and the side effects of disabling page faults
+- * and preemption. Use it only when absolutely necessary, e.g. from non
+- * preemptible contexts.
++ * While it is significantly faster than kmap() for the higmem case it
++ * comes with restrictions about the pointer validity. Only use when really
++ * necessary.
++ *
++ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
++ * disabling migration in order to keep the virtual address stable across
++ * preemption. No caller of kmap_local_page() can rely on this side effect.
++ */
++static inline void *kmap_local_page(struct page *page);
++
++/**
++ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
++ * @page: Pointer to the page to be mapped
++ *
++ * Returns: The virtual address of the mapping
++ *
++ * Effectively a wrapper around kmap_local_page() which disables pagefaults
++ * and preemption.
++ *
++ * Do not use in new code. Use kmap_local_page() instead.
+ */
+ static inline void *kmap_atomic(struct page *page);
+
+@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct page *page);
+ *
+ * Counterpart to kmap_atomic().
+ *
+- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
++ * Effectively a wrapper around kunmap_local() which additionally undoes
++ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * preemption.
+- *
+- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
+- * in the low memory area. For real highmen pages the mapping which was
+- * established with kmap_atomic() is destroyed.
+ */
+
+ /* Highmem related interfaces for management code */
+diff --git a/mm/highmem.c b/mm/highmem.c
+index d7a1c80001d0..8db577e5290c 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -450,6 +450,11 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
+ unsigned long vaddr;
+ int idx;
+
++ /*
++ * Disable migration so resulting virtual address is stable
++ * accross preemption.
++ */
++ migrate_disable();
+ preempt_disable();
+ idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+@@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr)
+ current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
+ kmap_local_idx_pop();
+ preempt_enable();
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(kunmap_local_indexed);
+
+--
+2.43.0
+