summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0034-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0034-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch179
1 files changed, 179 insertions, 0 deletions
diff --git a/debian/patches-rt/0034-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch b/debian/patches-rt/0034-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
new file mode 100644
index 000000000..667836a1e
--- /dev/null
+++ b/debian/patches-rt/0034-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -0,0 +1,179 @@
+From 3af9ca89d4398239a71471cffb488e3104990e23 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:23 +0100
+Subject: [PATCH 034/323] csky/mm/highmem: Switch to generic kmap atomic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+No reason having the same code in every architecture.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-csky@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/csky/Kconfig | 1 +
+ arch/csky/include/asm/fixmap.h | 4 +-
+ arch/csky/include/asm/highmem.h | 6 ++-
+ arch/csky/mm/highmem.c | 75 +--------------------------------
+ 4 files changed, 8 insertions(+), 78 deletions(-)
+
+diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
+index 7bf0a617e94c..c9f2533cc53d 100644
+--- a/arch/csky/Kconfig
++++ b/arch/csky/Kconfig
+@@ -286,6 +286,7 @@ config NR_CPUS
+ config HIGHMEM
+ bool "High Memory Support"
+ depends on !CPU_CK610
++ select KMAP_LOCAL
+ default y
+
+ config FORCE_MAX_ZONEORDER
+diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h
+index 81f9477d5330..4b589cc20900 100644
+--- a/arch/csky/include/asm/fixmap.h
++++ b/arch/csky/include/asm/fixmap.h
+@@ -8,7 +8,7 @@
+ #include <asm/memory.h>
+ #ifdef CONFIG_HIGHMEM
+ #include <linux/threads.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+ #endif
+
+ enum fixed_addresses {
+@@ -17,7 +17,7 @@ enum fixed_addresses {
+ #endif
+ #ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN,
+- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
++ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+ #endif
+ __end_of_fixed_addresses
+ };
+diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h
+index 14645e3d5cd5..1f4ed3f4c0d9 100644
+--- a/arch/csky/include/asm/highmem.h
++++ b/arch/csky/include/asm/highmem.h
+@@ -9,7 +9,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/uaccess.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+ #include <asm/cache.h>
+
+ /* undef for production */
+@@ -32,10 +32,12 @@ extern pte_t *pkmap_page_table;
+
+ #define ARCH_HAS_KMAP_FLUSH_TLB
+ extern void kmap_flush_tlb(unsigned long addr);
+-extern void *kmap_atomic_pfn(unsigned long pfn);
+
+ #define flush_cache_kmaps() do {} while (0)
+
++#define arch_kmap_local_post_map(vaddr, pteval) kmap_flush_tlb(vaddr)
++#define arch_kmap_local_post_unmap(vaddr) kmap_flush_tlb(vaddr)
++
+ extern void kmap_init(void);
+
+ #endif /* __KERNEL__ */
+diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
+index 89c10800a002..4161df3c6c15 100644
+--- a/arch/csky/mm/highmem.c
++++ b/arch/csky/mm/highmem.c
+@@ -9,8 +9,6 @@
+ #include <asm/tlbflush.h>
+ #include <asm/cacheflush.h>
+
+-static pte_t *kmap_pte;
+-
+ unsigned long highstart_pfn, highend_pfn;
+
+ void kmap_flush_tlb(unsigned long addr)
+@@ -19,67 +17,7 @@ void kmap_flush_tlb(unsigned long addr)
+ }
+ EXPORT_SYMBOL(kmap_flush_tlb);
+
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- unsigned long vaddr;
+- int idx, type;
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR*smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- BUG_ON(!pte_none(*(kmap_pte - idx)));
+-#endif
+- set_pte(kmap_pte-idx, mk_pte(page, prot));
+- flush_tlb_one((unsigned long)vaddr);
+-
+- return (void *)vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kvaddr)
+-{
+- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+- int idx;
+-
+- if (vaddr < FIXADDR_START)
+- return;
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
+-
+- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+-
+- pte_clear(&init_mm, vaddr, kmap_pte - idx);
+- flush_tlb_one(vaddr);
+-#else
+- (void) idx; /* to kill a warning */
+-#endif
+- kmap_atomic_idx_pop();
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+-
+-/*
+- * This is the same as kmap_atomic() but can map memory that doesn't
+- * have a struct page associated with it.
+- */
+-void *kmap_atomic_pfn(unsigned long pfn)
+-{
+- unsigned long vaddr;
+- int idx, type;
+-
+- pagefault_disable();
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR*smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
+- flush_tlb_one(vaddr);
+-
+- return (void *) vaddr;
+-}
+-
+-static void __init kmap_pages_init(void)
++void __init kmap_init(void)
+ {
+ unsigned long vaddr;
+ pgd_t *pgd;
+@@ -96,14 +34,3 @@ static void __init kmap_pages_init(void)
+ pte = pte_offset_kernel(pmd, vaddr);
+ pkmap_page_table = pte;
+ }
+-
+-void __init kmap_init(void)
+-{
+- unsigned long vaddr;
+-
+- kmap_pages_init();
+-
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
+-
+- kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
+-}
+--
+2.43.0
+