summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/cache-xsc3l2.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /arch/arm/mm/cache-xsc3l2.c
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/arm/mm/cache-xsc3l2.c')
-rw-r--r--arch/arm/mm/cache-xsc3l2.c208
1 files changed, 208 insertions, 0 deletions
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
new file mode 100644
index 000000000..d20d7af02
--- /dev/null
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
+ *
+ * Copyright (C) 2007 ARM Limited
+ */
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/cacheflush.h>
+
+#define CR_L2 (1 << 26)
+
+#define CACHE_LINE_SIZE 32
+#define CACHE_LINE_SHIFT 5
+#define CACHE_WAY_PER_SET 8
+
+#define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf))
+#define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
+
+static inline int xsc3_l2_present(void)
+{
+ unsigned long l2ctype;
+
+ __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
+
+ return !!(l2ctype & 0xf8);
+}
+
+static inline void xsc3_l2_clean_mva(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
+}
+
+static inline void xsc3_l2_inv_mva(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
+}
+
+static inline void xsc3_l2_inv_all(void)
+{
+ unsigned long l2ctype, set_way;
+ int set, way;
+
+ __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
+
+ for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
+ for (way = 0; way < CACHE_WAY_PER_SET; way++) {
+ set_way = (way << 29) | (set << 5);
+ __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
+ }
+ }
+
+ dsb();
+}
+
+static inline void l2_unmap_va(unsigned long va)
+{
+#ifdef CONFIG_HIGHMEM
+ if (va != -1)
+ kunmap_atomic((void *)va);
+#endif
+}
+
+static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long va = prev_va & PAGE_MASK;
+ unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
+ if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
+ /*
+ * Switching to a new page. Because cache ops are
+ * using virtual addresses only, we must put a mapping
+ * in place for it.
+ */
+ l2_unmap_va(prev_va);
+ va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
+ }
+ return va + (pa_offset >> (32 - PAGE_SHIFT));
+#else
+ return __phys_to_virt(pa);
+#endif
+}
+
+static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long vaddr;
+
+ if (start == 0 && end == -1ul) {
+ xsc3_l2_inv_all();
+ return;
+ }
+
+ vaddr = -1; /* to force the first mapping */
+
+ /*
+ * Clean and invalidate partial first cache line.
+ */
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
+ xsc3_l2_clean_mva(vaddr);
+ xsc3_l2_inv_mva(vaddr);
+ start = (start | (CACHE_LINE_SIZE - 1)) + 1;
+ }
+
+ /*
+ * Invalidate all full cache lines between 'start' and 'end'.
+ */
+ while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
+ vaddr = l2_map_va(start, vaddr);
+ xsc3_l2_inv_mva(vaddr);
+ start += CACHE_LINE_SIZE;
+ }
+
+ /*
+ * Clean and invalidate partial last cache line.
+ */
+ if (start < end) {
+ vaddr = l2_map_va(start, vaddr);
+ xsc3_l2_clean_mva(vaddr);
+ xsc3_l2_inv_mva(vaddr);
+ }
+
+ l2_unmap_va(vaddr);
+
+ dsb();
+}
+
+static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
+{
+ unsigned long vaddr;
+
+ vaddr = -1; /* to force the first mapping */
+
+ start &= ~(CACHE_LINE_SIZE - 1);
+ while (start < end) {
+ vaddr = l2_map_va(start, vaddr);
+ xsc3_l2_clean_mva(vaddr);
+ start += CACHE_LINE_SIZE;
+ }
+
+ l2_unmap_va(vaddr);
+
+ dsb();
+}
+
+/*
+ * optimize L2 flush all operation by set/way format
+ */
+static inline void xsc3_l2_flush_all(void)
+{
+ unsigned long l2ctype, set_way;
+ int set, way;
+
+ __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
+
+ for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
+ for (way = 0; way < CACHE_WAY_PER_SET; way++) {
+ set_way = (way << 29) | (set << 5);
+ __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
+ }
+ }
+
+ dsb();
+}
+
+static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
+{
+ unsigned long vaddr;
+
+ if (start == 0 && end == -1ul) {
+ xsc3_l2_flush_all();
+ return;
+ }
+
+ vaddr = -1; /* to force the first mapping */
+
+ start &= ~(CACHE_LINE_SIZE - 1);
+ while (start < end) {
+ vaddr = l2_map_va(start, vaddr);
+ xsc3_l2_clean_mva(vaddr);
+ xsc3_l2_inv_mva(vaddr);
+ start += CACHE_LINE_SIZE;
+ }
+
+ l2_unmap_va(vaddr);
+
+ dsb();
+}
+
+static int __init xsc3_l2_init(void)
+{
+ if (!cpu_is_xsc3() || !xsc3_l2_present())
+ return 0;
+
+ if (get_cr() & CR_L2) {
+ pr_info("XScale3 L2 cache enabled.\n");
+ xsc3_l2_inv_all();
+
+ outer_cache.inv_range = xsc3_l2_inv_range;
+ outer_cache.clean_range = xsc3_l2_clean_range;
+ outer_cache.flush_range = xsc3_l2_flush_range;
+ }
+
+ return 0;
+}
+core_initcall(xsc3_l2_init);