summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0032-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:06:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:06:00 +0000
commitb15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch)
tree1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0032-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
parentAdding upstream version 5.10.209. (diff)
downloadlinux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz
linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip
Adding debian version 5.10.209-2.debian/5.10.209-2debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0032-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch212
1 files changed, 212 insertions, 0 deletions
diff --git a/debian/patches-rt/0032-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch b/debian/patches-rt/0032-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
new file mode 100644
index 000000000..581f1da69
--- /dev/null
+++ b/debian/patches-rt/0032-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
@@ -0,0 +1,212 @@
+From 7eeaddc0ba0792dd0d8a1f5f167ff44230b77855 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:21 +0100
+Subject: [PATCH 032/323] arc/mm/highmem: Use generic kmap atomic
+ implementation
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+Adopt the map ordering to match the other architectures and the generic
+code. Also make the maximum entries limited and not dependend on the number
+of CPUs. With the original implementation did the following calculation:
+
+ nr_slots = mapsize >> PAGE_SHIFT;
+
+The results in either 512 or 1024 total slots depending on
+configuration. The total slots have to be divided by the number of CPUs to
+get the number of slots per CPU (former KM_TYPE_NR). ARC supports up to 4k
+CPUs, so this just falls apart in random ways depending on the number of
+CPUs and the actual kmap (atomic) nesting. The comment in highmem.c:
+
+ * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
+ * slots across NR_CPUS would be more than sufficient (generic code defines
+ * KM_TYPE_NR as 20).
+
+is just wrong. KM_TYPE_NR (now KM_MAX_IDX) is the number of slots per CPU
+because kmap_local/atomic() needs to support nested mappings (thread,
+softirq, interrupt). While KM_MAX_IDX might be overestimated, the above
+reasoning is just wrong and clearly the highmem code was never tested with
+any system with more than a few CPUs.
+
+Use the default number of slots and fail the build when it does not
+fit. Randomly failing at runtime is not a really good option.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: linux-snps-arc@lists.infradead.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arc/Kconfig | 1 +
+ arch/arc/include/asm/highmem.h | 26 +++++++++++----
+ arch/arc/include/asm/kmap_types.h | 14 --------
+ arch/arc/mm/highmem.c | 54 +++----------------------------
+ 4 files changed, 26 insertions(+), 69 deletions(-)
+ delete mode 100644 arch/arc/include/asm/kmap_types.h
+
+diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
+index 0a89cc9def65..d8804001d550 100644
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -507,6 +507,7 @@ config LINUX_RAM_BASE
+ config HIGHMEM
+ bool "High Memory Support"
+ select ARCH_DISCONTIGMEM_ENABLE
++ select KMAP_LOCAL
+ help
+ With ARC 2G:2G address split, only upper 2G is directly addressable by
+ kernel. Enable this to potentially allow access to rest of 2G and PAE
+diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h
+index 6e5eafb3afdd..a6b8e2c352c4 100644
+--- a/arch/arc/include/asm/highmem.h
++++ b/arch/arc/include/asm/highmem.h
+@@ -9,17 +9,29 @@
+ #ifdef CONFIG_HIGHMEM
+
+ #include <uapi/asm/page.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
++
++#define FIXMAP_SIZE PGDIR_SIZE
++#define PKMAP_SIZE PGDIR_SIZE
+
+ /* start after vmalloc area */
+ #define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
+-#define FIXMAP_SIZE PGDIR_SIZE /* only 1 PGD worth */
+-#define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS)
+-#define FIXMAP_ADDR(nr) (FIXMAP_BASE + ((nr) << PAGE_SHIFT))
++
++#define FIX_KMAP_SLOTS (KM_MAX_IDX * NR_CPUS)
++#define FIX_KMAP_BEGIN (0UL)
++#define FIX_KMAP_END ((FIX_KMAP_BEGIN + FIX_KMAP_SLOTS) - 1)
++
++#define FIXADDR_TOP (FIXMAP_BASE + (FIX_KMAP_END << PAGE_SHIFT))
++
++/*
++ * This should be converted to the asm-generic version, but of course this
++ * is needlessly different from all other architectures. Sigh - tglx
++ */
++#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
++#define __virt_to_fix(x) (((FIXADDR_TOP - ((x) & PAGE_MASK))) >> PAGE_SHIFT)
+
+ /* start after fixmap area */
+ #define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE)
+-#define PKMAP_SIZE PGDIR_SIZE
+ #define LAST_PKMAP (PKMAP_SIZE >> PAGE_SHIFT)
+ #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+ #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+@@ -29,11 +41,13 @@
+
+ extern void kmap_init(void);
+
++#define arch_kmap_local_post_unmap(vaddr) \
++ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
++
+ static inline void flush_cache_kmaps(void)
+ {
+ flush_cache_all();
+ }
+-
+ #endif
+
+ #endif
+diff --git a/arch/arc/include/asm/kmap_types.h b/arch/arc/include/asm/kmap_types.h
+deleted file mode 100644
+index fecf7851ec32..000000000000
+--- a/arch/arc/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,14 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-only */
+-/*
+- * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+- */
+-
+-#ifndef _ASM_KMAP_TYPES_H
+-#define _ASM_KMAP_TYPES_H
+-
+-/*
+- * We primarily need to define KM_TYPE_NR here but that in turn
+- * is a function of PGDIR_SIZE etc.
+- * To avoid circular deps issue, put everything in asm/highmem.h
+- */
+-#endif
+diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
+index 1b9f473c6369..c79912a6b196 100644
+--- a/arch/arc/mm/highmem.c
++++ b/arch/arc/mm/highmem.c
+@@ -36,9 +36,8 @@
+ * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
+ * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
+ *
+- * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
+- * slots across NR_CPUS would be more than sufficient (generic code defines
+- * KM_TYPE_NR as 20).
++ * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
++ * CPU. So the number of CPUs sharing a single PTE page is limited.
+ *
+ * - pkmap being preemptible, in theory could do with more than 256 concurrent
+ * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
+@@ -47,48 +46,6 @@
+ */
+
+ extern pte_t * pkmap_page_table;
+-static pte_t * fixmap_page_table;
+-
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- int idx, cpu_idx;
+- unsigned long vaddr;
+-
+- cpu_idx = kmap_atomic_idx_push();
+- idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+- vaddr = FIXMAP_ADDR(idx);
+-
+- set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
+- mk_pte(page, prot));
+-
+- return (void *)vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kv)
+-{
+- unsigned long kvaddr = (unsigned long)kv;
+-
+- if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
+-
+- /*
+- * Because preemption is disabled, this vaddr can be associated
+- * with the current allocated index.
+- * But in case of multiple live kmap_atomic(), it still relies on
+- * callers to unmap in right order.
+- */
+- int cpu_idx = kmap_atomic_idx();
+- int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+-
+- WARN_ON(kvaddr != FIXMAP_ADDR(idx));
+-
+- pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
+- local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+-
+- kmap_atomic_idx_pop();
+- }
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+
+ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
+ {
+@@ -108,10 +65,9 @@ void __init kmap_init(void)
+ {
+ /* Due to recursive include hell, we can't do this in processor.h */
+ BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
++ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
++ BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
+
+- BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
+ pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
+-
+- BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+- fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
++ alloc_kmap_pgtable(FIXMAP_BASE);
+ }
+--
+2.43.0
+