summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /arch/sh/mm
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig250
-rw-r--r--arch/sh/mm/Makefile45
-rw-r--r--arch/sh/mm/alignment.c189
-rw-r--r--arch/sh/mm/asids-debugfs.c59
-rw-r--r--arch/sh/mm/cache-debugfs.c109
-rw-r--r--arch/sh/mm/cache-j2.c64
-rw-r--r--arch/sh/mm/cache-sh2.c90
-rw-r--r--arch/sh/mm/cache-sh2a.c188
-rw-r--r--arch/sh/mm/cache-sh3.c102
-rw-r--r--arch/sh/mm/cache-sh4.c400
-rw-r--r--arch/sh/mm/cache-sh7705.c200
-rw-r--r--arch/sh/mm/cache-shx3.c44
-rw-r--r--arch/sh/mm/cache.c368
-rw-r--r--arch/sh/mm/consistent.c65
-rw-r--r--arch/sh/mm/extable_32.c24
-rw-r--r--arch/sh/mm/fault.c491
-rw-r--r--arch/sh/mm/flush-sh4.c111
-rw-r--r--arch/sh/mm/hugetlbpage.c82
-rw-r--r--arch/sh/mm/init.c425
-rw-r--r--arch/sh/mm/ioremap.c149
-rw-r--r--arch/sh/mm/ioremap.h23
-rw-r--r--arch/sh/mm/ioremap_fixed.c135
-rw-r--r--arch/sh/mm/kmap.c66
-rw-r--r--arch/sh/mm/mmap.c184
-rw-r--r--arch/sh/mm/nommu.c98
-rw-r--r--arch/sh/mm/numa.c56
-rw-r--r--arch/sh/mm/pgtable.c57
-rw-r--r--arch/sh/mm/pmb.c887
-rw-r--r--arch/sh/mm/sram.c35
-rw-r--r--arch/sh/mm/tlb-debugfs.c160
-rw-r--r--arch/sh/mm/tlb-pteaex.c106
-rw-r--r--arch/sh/mm/tlb-sh3.c95
-rw-r--r--arch/sh/mm/tlb-sh4.c108
-rw-r--r--arch/sh/mm/tlb-urb.c93
-rw-r--r--arch/sh/mm/tlbex_32.c82
-rw-r--r--arch/sh/mm/tlbflush_32.c137
-rw-r--r--arch/sh/mm/uncached.c44
37 files changed, 5821 insertions, 0 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
new file mode 100644
index 0000000000..511c17aede
--- /dev/null
+++ b/arch/sh/mm/Kconfig
@@ -0,0 +1,250 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Memory management options"
+
+config MMU
+ bool "Support for memory management hardware"
+ depends on !CPU_SH2
+ default y
+ help
+ Some SH processors (such as SH-2/SH-2A) lack an MMU. In order to
+ boot on these systems, this option must not be set.
+
+ On other systems (such as the SH-3 and 4) where an MMU exists,
+ turning this off will boot the kernel on these machines with the
+ MMU implicitly switched off.
+
+config PAGE_OFFSET
+ hex
+ default "0x80000000" if MMU
+ default "0x00000000"
+
+config ARCH_FORCE_MAX_ORDER
+ int "Order of maximal physically contiguous allocations"
+ default "8" if PAGE_SIZE_16KB
+ default "6" if PAGE_SIZE_64KB
+ default "13" if !MMU
+ default "10"
+ help
+ The kernel page allocator limits the size of maximal physically
+ contiguous allocations. The limit is called MAX_ORDER and it
+ defines the maximal power of two of number of pages that can be
+ allocated as a single contiguous block. This option allows
+ overriding the default setting when ability to allocate very
+ large blocks of physically contiguous memory is required.
+
+ The page size is not necessarily 4KB. Keep this in mind when
+ choosing a value for this option.
+
+ Don't change if unsure.
+
+config MEMORY_START
+ hex "Physical memory start address"
+ default "0x08000000"
+ help
+ Computers built with Hitachi SuperH processors always
+ map the ROM starting at address zero. But the processor
+ does not specify the range that RAM takes.
+
+ The physical memory (RAM) start address will be automatically
+ set to 08000000. Other platforms, such as the Solution Engine
+ boards typically map RAM at 0C000000.
+
+ Tweak this only when porting to a new machine which does not
+ already have a defconfig. Changing it from the known correct
+ value on any of the known systems will only lead to disaster.
+
+config MEMORY_SIZE
+ hex "Physical memory size"
+ default "0x04000000"
+ help
+ This sets the default memory size assumed by your SH kernel. It can
+ be overridden as normal by the 'mem=' argument on the kernel command
+ line. If unsure, consult your board specifications or just leave it
+ as 0x04000000 which was the default value before this became
+ configurable.
+
+# Physical addressing modes
+
+config 29BIT
+ def_bool !32BIT
+ select UNCACHED_MAPPING
+
+config 32BIT
+ bool
+ default !MMU
+
+config PMB
+ bool "Support 32-bit physical addressing through PMB"
+ depends on MMU && CPU_SH4A && !CPU_SH4AL_DSP
+ select 32BIT
+ select UNCACHED_MAPPING
+ help
+ If you say Y here, physical addressing will be extended to
+ 32-bits through the SH-4A PMB. If this is not set, legacy
+ 29-bit physical addressing will be used.
+
+config X2TLB
+ def_bool y
+ depends on (CPU_SHX2 || CPU_SHX3) && MMU
+
+config VSYSCALL
+ bool "Support vsyscall page"
+ depends on MMU && (CPU_SH3 || CPU_SH4)
+ default y
+ help
+ This will enable support for the kernel mapping a vDSO page
+ in process space, and subsequently handing down the entry point
+ to the libc through the ELF auxiliary vector.
+
+ From the kernel side this is used for the signal trampoline.
+ For systems with an MMU that can afford to give up a page,
+ (the default value) say Y.
+
+config NUMA
+ bool "Non-Uniform Memory Access (NUMA) Support"
+ depends on MMU && SYS_SUPPORTS_NUMA
+ select ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ default n
+ help
+ Some SH systems have many various memories scattered around
+ the address space, each with varying latencies. This enables
+ support for these blocks by binding them to nodes and allowing
+ memory policies to be used for prioritizing and controlling
+ allocation behaviour.
+
+config NODES_SHIFT
+ int
+ default "3" if CPU_SUBTYPE_SHX3
+ default "1"
+ depends on NUMA
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+ depends on !NUMA
+
+config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+ select SPARSEMEM_STATIC
+
+config ARCH_SPARSEMEM_DEFAULT
+ def_bool y
+
+config ARCH_SELECT_MEMORY_MODEL
+ def_bool y
+
+config ARCH_MEMORY_PROBE
+ def_bool y
+ depends on MEMORY_HOTPLUG
+
+config IOREMAP_FIXED
+ def_bool y
+ depends on X2TLB
+
+config UNCACHED_MAPPING
+ bool
+
+config HAVE_SRAM_POOL
+ bool
+ select GENERIC_ALLOCATOR
+
+choice
+ prompt "Kernel page size"
+ default PAGE_SIZE_4KB
+
+config PAGE_SIZE_4KB
+ bool "4kB"
+ help
+ This is the default page size used by all SuperH CPUs.
+
+config PAGE_SIZE_8KB
+ bool "8kB"
+ depends on !MMU || X2TLB
+ help
+ This enables 8kB pages as supported by SH-X2 and later MMUs.
+
+config PAGE_SIZE_16KB
+ bool "16kB"
+ depends on !MMU
+ help
+ This enables 16kB pages on MMU-less SH systems.
+
+config PAGE_SIZE_64KB
+ bool "64kB"
+ depends on !MMU || CPU_SH4
+ help
+ This enables support for 64kB pages, possible on all SH-4
+ CPUs and later.
+
+endchoice
+
+choice
+ prompt "HugeTLB page size"
+ depends on HUGETLB_PAGE
+ default HUGETLB_PAGE_SIZE_1MB if PAGE_SIZE_64KB
+ default HUGETLB_PAGE_SIZE_64K
+
+config HUGETLB_PAGE_SIZE_64K
+ bool "64kB"
+ depends on !PAGE_SIZE_64KB
+
+config HUGETLB_PAGE_SIZE_256K
+ bool "256kB"
+ depends on X2TLB
+
+config HUGETLB_PAGE_SIZE_1MB
+ bool "1MB"
+
+config HUGETLB_PAGE_SIZE_4MB
+ bool "4MB"
+ depends on X2TLB
+
+config HUGETLB_PAGE_SIZE_64MB
+ bool "64MB"
+ depends on X2TLB
+
+endchoice
+
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on SMP
+ default y
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
+endmenu
+
+menu "Cache configuration"
+
+config SH7705_CACHE_32KB
+ bool "Enable 32KB cache size for SH7705"
+ depends on CPU_SUBTYPE_SH7705
+ default y
+
+choice
+ prompt "Cache mode"
+ default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4
+ default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
+
+config CACHE_WRITEBACK
+ bool "Write-back"
+
+config CACHE_WRITETHROUGH
+ bool "Write-through"
+ help
+ Selecting this option will configure the caches in write-through
+ mode, as opposed to the default write-back configuration.
+
+ Since there's sill some aliasing issues on SH-4, this option will
+ unfortunately still require the majority of flushing functions to
+ be implemented to deal with aliasing.
+
+ If unsure, say N.
+
+config CACHE_OFF
+ bool "Off"
+
+endchoice
+
+endmenu
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
new file mode 100644
index 0000000000..f69ddc70b1
--- /dev/null
+++ b/arch/sh/mm/Makefile
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux SuperH-specific parts of the memory manager.
+#
+
+obj-y := alignment.o cache.o init.o consistent.o mmap.o
+
+cacheops-$(CONFIG_CPU_J2) := cache-j2.o
+cacheops-$(CONFIG_CPU_SUBTYPE_SH7619) := cache-sh2.o
+cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o
+cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
+cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
+cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
+cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
+
+obj-y += $(cacheops-y)
+
+mmu-y := nommu.o extable_32.o
+mmu-$(CONFIG_MMU) := extable_32.o fault.o ioremap.o kmap.o \
+ pgtable.o tlbex_32.o tlbflush_32.o
+
+obj-y += $(mmu-y)
+
+debugfs-y := asids-debugfs.o
+ifndef CONFIG_CACHE_OFF
+debugfs-$(CONFIG_CPU_SH4) += cache-debugfs.o
+endif
+
+ifdef CONFIG_MMU
+debugfs-$(CONFIG_CPU_SH4) += tlb-debugfs.o
+tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
+tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
+tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
+obj-y += $(tlb-y)
+endif
+
+obj-$(CONFIG_DEBUG_FS) += $(debugfs-y)
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_PMB) += pmb.o
+obj-$(CONFIG_NUMA) += numa.o
+obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
+obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
+obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
+
+GCOV_PROFILE_pmb.o := n
diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c
new file mode 100644
index 0000000000..3a76a766f4
--- /dev/null
+++ b/arch/sh/mm/alignment.c
@@ -0,0 +1,189 @@
+/*
+ * Alignment access counters and corresponding user-space interfaces.
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
+#include <asm/alignment.h>
+#include <asm/processor.h>
+
+static unsigned long se_user;
+static unsigned long se_sys;
+static unsigned long se_half;
+static unsigned long se_word;
+static unsigned long se_dword;
+static unsigned long se_multi;
+/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not
+ valid! */
+static int se_usermode = UM_WARN | UM_FIXUP;
+/* 0: no warning 1: print a warning message, disabled by default */
+static int se_kernmode_warn;
+
+core_param(alignment, se_usermode, int, 0600);
+
+void inc_unaligned_byte_access(void)
+{
+ se_half++;
+}
+
+void inc_unaligned_word_access(void)
+{
+ se_word++;
+}
+
+void inc_unaligned_dword_access(void)
+{
+ se_dword++;
+}
+
+void inc_unaligned_multi_access(void)
+{
+ se_multi++;
+}
+
+void inc_unaligned_user_access(void)
+{
+ se_user++;
+}
+
+void inc_unaligned_kernel_access(void)
+{
+ se_sys++;
+}
+
+/*
+ * This defaults to the global policy which can be set from the command
+ * line, while processes can overload their preferences via prctl().
+ */
+unsigned int unaligned_user_action(void)
+{
+ unsigned int action = se_usermode;
+
+ if (current->thread.flags & SH_THREAD_UAC_SIGBUS) {
+ action &= ~UM_FIXUP;
+ action |= UM_SIGNAL;
+ }
+
+ if (current->thread.flags & SH_THREAD_UAC_NOPRINT)
+ action &= ~UM_WARN;
+
+ return action;
+}
+
+int get_unalign_ctl(struct task_struct *tsk, unsigned long addr)
+{
+ return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK,
+ (unsigned int __user *)addr);
+}
+
+int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
+{
+ tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) |
+ (val & SH_THREAD_UAC_MASK);
+ return 0;
+}
+
+void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn,
+ struct pt_regs *regs)
+{
+ if (user_mode(regs) && (se_usermode & UM_WARN))
+ pr_notice_ratelimited("Fixing up unaligned userspace access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ tsk->comm, task_pid_nr(tsk),
+ (void *)instruction_pointer(regs), insn);
+ else if (se_kernmode_warn)
+ pr_notice_ratelimited("Fixing up unaligned kernel access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ tsk->comm, task_pid_nr(tsk),
+ (void *)instruction_pointer(regs), insn);
+}
+
+static const char *se_usermode_action[] = {
+ "ignored",
+ "warn",
+ "fixup",
+ "fixup+warn",
+ "signal",
+ "signal+warn"
+};
+
+static int alignment_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "User:\t\t%lu\n", se_user);
+ seq_printf(m, "System:\t\t%lu\n", se_sys);
+ seq_printf(m, "Half:\t\t%lu\n", se_half);
+ seq_printf(m, "Word:\t\t%lu\n", se_word);
+ seq_printf(m, "DWord:\t\t%lu\n", se_dword);
+ seq_printf(m, "Multi:\t\t%lu\n", se_multi);
+ seq_printf(m, "User faults:\t%i (%s)\n", se_usermode,
+ se_usermode_action[se_usermode]);
+ seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
+ se_kernmode_warn ? "+warn" : "");
+ return 0;
+}
+
+static int alignment_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, alignment_proc_show, NULL);
+}
+
+static ssize_t alignment_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ int *data = pde_data(file_inode(file));
+ char mode;
+
+ if (count > 0) {
+ if (get_user(mode, buffer))
+ return -EFAULT;
+ if (mode >= '0' && mode <= '5')
+ *data = mode - '0';
+ }
+ return count;
+}
+
+static const struct proc_ops alignment_proc_ops = {
+ .proc_open = alignment_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = alignment_proc_write,
+};
+
+/*
+ * This needs to be done after sysctl_init_bases(), otherwise sys/ will be
+ * overwritten. Actually, this shouldn't be in sys/ at all since
+ * it isn't a sysctl, and it doesn't contain sysctl information.
+ * We now locate it in /proc/cpu/alignment instead.
+ */
+static int __init alignment_init(void)
+{
+ struct proc_dir_entry *dir, *res;
+
+ dir = proc_mkdir("cpu", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
+ &alignment_proc_ops, &se_usermode);
+ if (!res)
+ return -ENOMEM;
+
+ res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
+ &alignment_proc_ops, &se_kernmode_warn);
+ if (!res)
+ return -ENOMEM;
+
+ return 0;
+}
+fs_initcall(alignment_init);
diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c
new file mode 100644
index 0000000000..d16d6f5ec7
--- /dev/null
+++ b/arch/sh/mm/asids-debugfs.c
@@ -0,0 +1,59 @@
+/*
+ * debugfs ops for process ASIDs
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2008 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * Provides a debugfs file that lists out the ASIDs currently associated
+ * with the processes.
+ *
+ * In the SH-5 case, if the DM.PC register is examined through the debug
+ * link, this shows ASID + PC. To make use of this, the PID->ASID
+ * relationship needs to be known. This is primarily for debugging.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task.h>
+
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+
+static int asids_debugfs_show(struct seq_file *file, void *iter)
+{
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+
+ for_each_process(p) {
+ int pid = p->pid;
+
+ if (unlikely(!pid))
+ continue;
+
+ if (p->mm)
+ seq_printf(file, "%5d : %04lx\n", pid,
+ cpu_asid(smp_processor_id(), p->mm));
+ }
+
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(asids_debugfs);
+
+static int __init asids_debugfs_init(void)
+{
+ debugfs_create_file("asids", S_IRUSR, arch_debugfs_dir, NULL,
+ &asids_debugfs_fops);
+ return 0;
+}
+device_initcall(asids_debugfs_init);
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
new file mode 100644
index 0000000000..b0f185169d
--- /dev/null
+++ b/arch/sh/mm/cache-debugfs.c
@@ -0,0 +1,109 @@
+/*
+ * debugfs ops for the L1 cache
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/processor.h>
+#include <linux/uaccess.h>
+#include <asm/cache.h>
+#include <asm/io.h>
+
+enum cache_type {
+ CACHE_TYPE_ICACHE,
+ CACHE_TYPE_DCACHE,
+ CACHE_TYPE_UNIFIED,
+};
+
+static int cache_debugfs_show(struct seq_file *file, void *iter)
+{
+ unsigned int cache_type = (unsigned int)file->private;
+ struct cache_info *cache;
+ unsigned int waysize, way;
+ unsigned long ccr;
+ unsigned long addrstart = 0;
+
+ /*
+ * Go uncached immediately so we don't skew the results any
+ * more than we already are..
+ */
+ jump_to_uncached();
+
+ ccr = __raw_readl(SH_CCR);
+ if ((ccr & CCR_CACHE_ENABLE) == 0) {
+ back_to_cached();
+
+ seq_printf(file, "disabled\n");
+ return 0;
+ }
+
+ if (cache_type == CACHE_TYPE_DCACHE) {
+ addrstart = CACHE_OC_ADDRESS_ARRAY;
+ cache = &current_cpu_data.dcache;
+ } else {
+ addrstart = CACHE_IC_ADDRESS_ARRAY;
+ cache = &current_cpu_data.icache;
+ }
+
+ waysize = cache->sets;
+
+ /*
+ * If the OC is already in RAM mode, we only have
+ * half of the entries to consider..
+ */
+ if ((ccr & CCR_CACHE_ORA) && cache_type == CACHE_TYPE_DCACHE)
+ waysize >>= 1;
+
+ waysize <<= cache->entry_shift;
+
+ for (way = 0; way < cache->ways; way++) {
+ unsigned long addr;
+ unsigned int line;
+
+ seq_printf(file, "-----------------------------------------\n");
+ seq_printf(file, "Way %d\n", way);
+ seq_printf(file, "-----------------------------------------\n");
+
+ for (addr = addrstart, line = 0;
+ addr < addrstart + waysize;
+ addr += cache->linesz, line++) {
+ unsigned long data = __raw_readl(addr);
+
+ /* Check the V bit, ignore invalid cachelines */
+ if ((data & 1) == 0)
+ continue;
+
+ /* U: Dirty, cache tag is 10 bits up */
+ seq_printf(file, "%3d: %c 0x%lx\n",
+ line, data & 2 ? 'U' : ' ',
+ data & 0x1ffffc00);
+ }
+
+ addrstart += cache->way_incr;
+ }
+
+ back_to_cached();
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(cache_debugfs);
+
+static int __init cache_debugfs_init(void)
+{
+ debugfs_create_file("dcache", S_IRUSR, arch_debugfs_dir,
+ (void *)CACHE_TYPE_DCACHE, &cache_debugfs_fops);
+ debugfs_create_file("icache", S_IRUSR, arch_debugfs_dir,
+ (void *)CACHE_TYPE_ICACHE, &cache_debugfs_fops);
+ return 0;
+}
+module_init(cache_debugfs_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/sh/mm/cache-j2.c b/arch/sh/mm/cache-j2.c
new file mode 100644
index 0000000000..9ac9602143
--- /dev/null
+++ b/arch/sh/mm/cache-j2.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/sh/mm/cache-j2.c
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/cpumask.h>
+
+#include <asm/cache.h>
+#include <asm/addrspace.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+
+#define ICACHE_ENABLE 0x1
+#define DCACHE_ENABLE 0x2
+#define CACHE_ENABLE (ICACHE_ENABLE | DCACHE_ENABLE)
+#define ICACHE_FLUSH 0x100
+#define DCACHE_FLUSH 0x200
+#define CACHE_FLUSH (ICACHE_FLUSH | DCACHE_FLUSH)
+
+u32 __iomem *j2_ccr_base;
+
+static void j2_flush_icache(void *args)
+{
+ unsigned cpu;
+ for_each_possible_cpu(cpu)
+ __raw_writel(CACHE_ENABLE | ICACHE_FLUSH, j2_ccr_base + cpu);
+}
+
+static void j2_flush_dcache(void *args)
+{
+ unsigned cpu;
+ for_each_possible_cpu(cpu)
+ __raw_writel(CACHE_ENABLE | DCACHE_FLUSH, j2_ccr_base + cpu);
+}
+
+static void j2_flush_both(void *args)
+{
+ unsigned cpu;
+ for_each_possible_cpu(cpu)
+ __raw_writel(CACHE_ENABLE | CACHE_FLUSH, j2_ccr_base + cpu);
+}
+
+void __init j2_cache_init(void)
+{
+ if (!j2_ccr_base)
+ return;
+
+ local_flush_cache_all = j2_flush_both;
+ local_flush_cache_mm = j2_flush_both;
+ local_flush_cache_dup_mm = j2_flush_both;
+ local_flush_cache_page = j2_flush_both;
+ local_flush_cache_range = j2_flush_both;
+ local_flush_dcache_folio = j2_flush_dcache;
+ local_flush_icache_range = j2_flush_icache;
+ local_flush_icache_folio = j2_flush_icache;
+ local_flush_cache_sigtramp = j2_flush_icache;
+
+ pr_info("Initial J2 CCR is %.8x\n", __raw_readl(j2_ccr_base));
+}
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c
new file mode 100644
index 0000000000..f2b6cdbdf9
--- /dev/null
+++ b/arch/sh/mm/cache-sh2.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/sh/mm/cache-sh2.c
+ *
+ * Copyright (C) 2002 Paul Mundt
+ * Copyright (C) 2008 Yoshinori Sato
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#include <asm/cache.h>
+#include <asm/addrspace.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+
+static void sh2__flush_wback_region(void *start, int size)
+{
+ unsigned long v;
+ unsigned long begin, end;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+ unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0);
+ int way;
+ for (way = 0; way < 4; way++) {
+ unsigned long data = __raw_readl(addr | (way << 12));
+ if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
+ data &= ~SH_CACHE_UPDATED;
+ __raw_writel(data, addr | (way << 12));
+ }
+ }
+ }
+}
+
+static void sh2__flush_purge_region(void *start, int size)
+{
+ unsigned long v;
+ unsigned long begin, end;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+
+ for (v = begin; v < end; v+=L1_CACHE_BYTES)
+ __raw_writel((v & CACHE_PHYSADDR_MASK),
+ CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008);
+}
+
+static void sh2__flush_invalidate_region(void *start, int size)
+{
+#ifdef CONFIG_CACHE_WRITEBACK
+ /*
+ * SH-2 does not support individual line invalidation, only a
+ * global invalidate.
+ */
+ unsigned long ccr;
+ unsigned long flags;
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ ccr = __raw_readl(SH_CCR);
+ ccr |= CCR_CACHE_INVALIDATE;
+ __raw_writel(ccr, SH_CCR);
+
+ back_to_cached();
+ local_irq_restore(flags);
+#else
+ unsigned long v;
+ unsigned long begin, end;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+
+ for (v = begin; v < end; v+=L1_CACHE_BYTES)
+ __raw_writel((v & CACHE_PHYSADDR_MASK),
+ CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008);
+#endif
+}
+
+void __init sh2_cache_init(void)
+{
+ __flush_wback_region = sh2__flush_wback_region;
+ __flush_purge_region = sh2__flush_purge_region;
+ __flush_invalidate_region = sh2__flush_invalidate_region;
+}
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c
new file mode 100644
index 0000000000..4f7739e707
--- /dev/null
+++ b/arch/sh/mm/cache-sh2a.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/sh/mm/cache-sh2a.c
+ *
+ * Copyright (C) 2008 Yoshinori Sato
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#include <asm/cache.h>
+#include <asm/addrspace.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+
+/*
+ * The maximum number of pages we support up to when doing ranged dcache
+ * flushing. Anything exceeding this will simply flush the dcache in its
+ * entirety.
+ */
+#define MAX_OCACHE_PAGES 32
+#define MAX_ICACHE_PAGES 32
+
+#ifdef CONFIG_CACHE_WRITEBACK
+static void sh2a_flush_oc_line(unsigned long v, int way)
+{
+ unsigned long addr = (v & 0x000007f0) | (way << 11);
+ unsigned long data;
+
+ data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr);
+ if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
+ data &= ~SH_CACHE_UPDATED;
+ __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr);
+ }
+}
+#endif
+
+static void sh2a_invalidate_line(unsigned long cache_addr, unsigned long v)
+{
+ /* Set associative bit to hit all ways */
+ unsigned long addr = (v & 0x000007f0) | SH_CACHE_ASSOC;
+ __raw_writel((addr & CACHE_PHYSADDR_MASK), cache_addr | addr);
+}
+
+/*
+ * Write back the dirty D-caches, but not invalidate them.
+ */
+static void sh2a__flush_wback_region(void *start, int size)
+{
+#ifdef CONFIG_CACHE_WRITEBACK
+ unsigned long v;
+ unsigned long begin, end;
+ unsigned long flags;
+ int nr_ways;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+ nr_ways = current_cpu_data.dcache.ways;
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ /* If there are too many pages then flush the entire cache */
+ if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
+ begin = CACHE_OC_ADDRESS_ARRAY;
+ end = begin + (nr_ways * current_cpu_data.dcache.way_size);
+
+ for (v = begin; v < end; v += L1_CACHE_BYTES) {
+ unsigned long data = __raw_readl(v);
+ if (data & SH_CACHE_UPDATED)
+ __raw_writel(data & ~SH_CACHE_UPDATED, v);
+ }
+ } else {
+ int way;
+ for (way = 0; way < nr_ways; way++) {
+ for (v = begin; v < end; v += L1_CACHE_BYTES)
+ sh2a_flush_oc_line(v, way);
+ }
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+#endif
+}
+
+/*
+ * Write back the dirty D-caches and invalidate them.
+ */
+static void sh2a__flush_purge_region(void *start, int size)
+{
+ unsigned long v;
+ unsigned long begin, end;
+ unsigned long flags;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+#ifdef CONFIG_CACHE_WRITEBACK
+ int way;
+ int nr_ways = current_cpu_data.dcache.ways;
+ for (way = 0; way < nr_ways; way++)
+ sh2a_flush_oc_line(v, way);
+#endif
+ sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+}
+
+/*
+ * Invalidate the D-caches, but no write back please
+ */
+static void sh2a__flush_invalidate_region(void *start, int size)
+{
+ unsigned long v;
+ unsigned long begin, end;
+ unsigned long flags;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ /* If there are too many pages then just blow the cache */
+ if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
+ __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
+ SH_CCR);
+ } else {
+ for (v = begin; v < end; v += L1_CACHE_BYTES)
+ sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+}
+
+/*
+ * Write back the range of D-cache, and purge the I-cache.
+ */
+static void sh2a_flush_icache_range(void *args)
+{
+ struct flusher_data *data = args;
+ unsigned long start, end;
+ unsigned long v;
+ unsigned long flags;
+
+ start = data->addr1 & ~(L1_CACHE_BYTES-1);
+ end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
+
+#ifdef CONFIG_CACHE_WRITEBACK
+ sh2a__flush_wback_region((void *)start, end-start);
+#endif
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ /* I-Cache invalidate */
+ /* If there are too many pages then just blow the cache */
+ if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
+ __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
+ SH_CCR);
+ } else {
+ for (v = start; v < end; v += L1_CACHE_BYTES)
+ sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+}
+
+void __init sh2a_cache_init(void)
+{
+ local_flush_icache_range = sh2a_flush_icache_range;
+
+ __flush_wback_region = sh2a__flush_wback_region;
+ __flush_purge_region = sh2a__flush_purge_region;
+ __flush_invalidate_region = sh2a__flush_invalidate_region;
+}
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c
new file mode 100644
index 0000000000..bc595982d3
--- /dev/null
+++ b/arch/sh/mm/cache-sh3.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/sh/mm/cache-sh3.c
+ *
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ * Copyright (C) 2002 Paul Mundt
+ */
+
+#include <linux/init.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/threads.h>
+#include <asm/addrspace.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Write back the dirty D-caches, but not invalidate them.
+ *
+ * Is this really worth it, or should we just alias this routine
+ * to __flush_purge_region too?
+ *
+ * START: Virtual Address (U0, P1, or P3)
+ * SIZE: Size of the region.
+ */
+
+static void sh3__flush_wback_region(void *start, int size)
+{
+ unsigned long v, j;
+ unsigned long begin, end;
+ unsigned long flags;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+ unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY;
+ for (j = 0; j < current_cpu_data.dcache.ways; j++) {
+ unsigned long data, addr, p;
+
+ p = __pa(v);
+ addr = addrstart | (v & current_cpu_data.dcache.entry_mask);
+ local_irq_save(flags);
+ data = __raw_readl(addr);
+
+ if ((data & CACHE_PHYSADDR_MASK) ==
+ (p & CACHE_PHYSADDR_MASK)) {
+ data &= ~SH_CACHE_UPDATED;
+ __raw_writel(data, addr);
+ local_irq_restore(flags);
+ break;
+ }
+ local_irq_restore(flags);
+ addrstart += current_cpu_data.dcache.way_incr;
+ }
+ }
+}
+
+/*
+ * Write back the dirty D-caches and invalidate them.
+ *
+ * START: Virtual Address (U0, P1, or P3)
+ * SIZE: Size of the region.
+ */
+static void sh3__flush_purge_region(void *start, int size)
+{
+ unsigned long v;
+ unsigned long begin, end;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+ unsigned long data, addr;
+
+ data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */
+ addr = CACHE_OC_ADDRESS_ARRAY |
+ (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC;
+ __raw_writel(data, addr);
+ }
+}
+
+void __init sh3_cache_init(void)
+{
+ __flush_wback_region = sh3__flush_wback_region;
+ __flush_purge_region = sh3__flush_purge_region;
+
+ /*
+ * No write back please
+ *
+ * Except I don't think there's any way to avoid the writeback.
+ * So we just alias it to sh3__flush_purge_region(). dwmw2.
+ */
+ __flush_invalidate_region = sh3__flush_purge_region;
+}
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
new file mode 100644
index 0000000000..862046f269
--- /dev/null
+++ b/arch/sh/mm/cache-sh4.c
@@ -0,0 +1,400 @@
+/*
+ * arch/sh/mm/cache-sh4.c
+ *
+ * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
+ * Copyright (C) 2001 - 2009 Paul Mundt
+ * Copyright (C) 2003 Richard Curnow
+ * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <asm/mmu_context.h>
+#include <asm/cache_insns.h>
+#include <asm/cacheflush.h>
+
+/*
+ * The maximum number of pages we support up to when doing ranged dcache
+ * flushing. Anything exceeding this will simply flush the dcache in its
+ * entirety.
+ */
+#define MAX_ICACHE_PAGES 32
+
+static void __flush_cache_one(unsigned long addr, unsigned long phys,
+ unsigned long exec_offset);
+
+/*
+ * Write back the range of D-cache, and purge the I-cache.
+ *
+ * Called from kernel/module.c:sys_init_module and routine for a.out format,
+ * signal handler code and kprobes code
+ */
+static void sh4_flush_icache_range(void *args)
+{
+ struct flusher_data *data = args;
+ unsigned long start, end;
+ unsigned long flags, v;
+ int i;
+
+ start = data->addr1;
+ end = data->addr2;
+
+ /* If there are too many pages then just blow away the caches */
+ if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
+ local_flush_cache_all(NULL);
+ return;
+ }
+
+ /*
+ * Selectively flush d-cache then invalidate the i-cache.
+ * This is inefficient, so only use this for small ranges.
+ */
+ start &= ~(L1_CACHE_BYTES-1);
+ end += L1_CACHE_BYTES-1;
+ end &= ~(L1_CACHE_BYTES-1);
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ for (v = start; v < end; v += L1_CACHE_BYTES) {
+ unsigned long icacheaddr;
+ int j, n;
+
+ __ocbwb(v);
+
+ icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
+ cpu_data->icache.entry_mask);
+
+ /* Clear i-cache line valid-bit */
+ n = boot_cpu_data.icache.n_aliases;
+ for (i = 0; i < cpu_data->icache.ways; i++) {
+ for (j = 0; j < n; j++)
+ __raw_writel(0, icacheaddr + (j * PAGE_SIZE));
+ icacheaddr += cpu_data->icache.way_incr;
+ }
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+}
+
+static inline void flush_cache_one(unsigned long start, unsigned long phys)
+{
+ unsigned long flags, exec_offset = 0;
+
+ /*
+ * All types of SH-4 require PC to be uncached to operate on the I-cache.
+ * Some types of SH-4 require PC to be uncached to operate on the D-cache.
+ */
+ if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
+ (start < CACHE_OC_ADDRESS_ARRAY))
+ exec_offset = cached_to_uncached;
+
+ local_irq_save(flags);
+ __flush_cache_one(start, phys, exec_offset);
+ local_irq_restore(flags);
+}
+
+/*
+ * Write back & invalidate the D-cache of the page.
+ * (To avoid "alias" issues)
+ */
+static void sh4_flush_dcache_folio(void *arg)
+{
+ struct folio *folio = arg;
+#ifndef CONFIG_SMP
+ struct address_space *mapping = folio_flush_mapping(folio);
+
+ if (mapping && !mapping_mapped(mapping))
+ clear_bit(PG_dcache_clean, &folio->flags);
+ else
+#endif
+ {
+ unsigned long pfn = folio_pfn(folio);
+ unsigned long addr = (unsigned long)folio_address(folio);
+ unsigned int i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++) {
+ flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
+ (addr & shm_align_mask),
+ pfn * PAGE_SIZE);
+ addr += PAGE_SIZE;
+ pfn++;
+ }
+ }
+
+ wmb();
+}
+
+/* TODO: Selective icache invalidation through IC address array.. */
+static void flush_icache_all(void)
+{
+ unsigned long flags, ccr;
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ /* Flush I-cache */
+ ccr = __raw_readl(SH_CCR);
+ ccr |= CCR_CACHE_ICI;
+ __raw_writel(ccr, SH_CCR);
+
+ /*
+ * back_to_cached() will take care of the barrier for us, don't add
+ * another one!
+ */
+
+ back_to_cached();
+ local_irq_restore(flags);
+}
+
+static void flush_dcache_all(void)
+{
+ unsigned long addr, end_addr, entry_offset;
+
+ end_addr = CACHE_OC_ADDRESS_ARRAY +
+ (current_cpu_data.dcache.sets <<
+ current_cpu_data.dcache.entry_shift) *
+ current_cpu_data.dcache.ways;
+
+ entry_offset = 1 << current_cpu_data.dcache.entry_shift;
+
+ for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ __raw_writel(0, addr); addr += entry_offset;
+ }
+}
+
+static void sh4_flush_cache_all(void *unused)
+{
+ flush_dcache_all();
+ flush_icache_all();
+}
+
+/*
+ * Note : (RPC) since the caches are physically tagged, the only point
+ * of flush_cache_mm for SH-4 is to get rid of aliases from the
+ * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
+ * lines can stay resident so long as the virtual address they were
+ * accessed with (hence cache set) is in accord with the physical
+ * address (i.e. tag). It's no different here.
+ *
+ * Caller takes mm->mmap_lock.
+ */
+static void sh4_flush_cache_mm(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
+ return;
+
+ flush_dcache_all();
+}
+
+/*
+ * Write back and invalidate I/D-caches for the page.
+ *
+ * ADDR: Virtual Address (U0 address)
+ * PFN: Physical page number
+ */
+static void sh4_flush_cache_page(void *args)
+{
+ struct flusher_data *data = args;
+ struct vm_area_struct *vma;
+ struct page *page;
+ unsigned long address, pfn, phys;
+ int map_coherent = 0;
+ pmd_t *pmd;
+ pte_t *pte;
+ void *vaddr;
+
+ vma = data->vma;
+ address = data->addr1 & PAGE_MASK;
+ pfn = data->addr2;
+ phys = pfn << PAGE_SHIFT;
+ page = pfn_to_page(pfn);
+
+ if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
+ return;
+
+ pmd = pmd_off(vma->vm_mm, address);
+ pte = pte_offset_kernel(pmd, address);
+
+ /* If the page isn't present, there is nothing to do here. */
+ if (!(pte_val(*pte) & _PAGE_PRESENT))
+ return;
+
+ if ((vma->vm_mm == current->active_mm))
+ vaddr = NULL;
+ else {
+ /*
+ * Use kmap_coherent or kmap_atomic to do flushes for
+ * another ASID than the current one.
+ */
+ map_coherent = (current_cpu_data.dcache.n_aliases &&
+ test_bit(PG_dcache_clean, &page->flags) &&
+ page_mapcount(page));
+ if (map_coherent)
+ vaddr = kmap_coherent(page, address);
+ else
+ vaddr = kmap_atomic(page);
+
+ address = (unsigned long)vaddr;
+ }
+
+ flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
+ (address & shm_align_mask), phys);
+
+ if (vma->vm_flags & VM_EXEC)
+ flush_icache_all();
+
+ if (vaddr) {
+ if (map_coherent)
+ kunmap_coherent(vaddr);
+ else
+ kunmap_atomic(vaddr);
+ }
+}
+
+/*
+ * Write back and invalidate D-caches.
+ *
+ * START, END: Virtual Address (U0 address)
+ *
+ * NOTE: We need to flush the _physical_ page entry.
+ * Flushing the cache lines for U0 only isn't enough.
+ * We need to flush for P1 too, which may contain aliases.
+ */
+static void sh4_flush_cache_range(void *args)
+{
+ struct flusher_data *data = args;
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+
+ vma = data->vma;
+ start = data->addr1;
+ end = data->addr2;
+
+ if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
+ return;
+
+ /*
+ * If cache is only 4k-per-way, there are never any 'aliases'. Since
+ * the cache is physically tagged, the data can just be left in there.
+ */
+ if (boot_cpu_data.dcache.n_aliases == 0)
+ return;
+
+ flush_dcache_all();
+
+ if (vma->vm_flags & VM_EXEC)
+ flush_icache_all();
+}
+
+/**
+ * __flush_cache_one
+ *
+ * @addr: address in memory mapped cache array
+ * @phys: P1 address to flush (has to match tags if addr has 'A' bit
+ * set i.e. associative write)
+ * @exec_offset: set to 0x20000000 if flush has to be executed from P2
+ * region else 0x0
+ *
+ * The offset into the cache array implied by 'addr' selects the
+ * 'colour' of the virtual address range that will be flushed. The
+ * operation (purge/write-back) is selected by the lower 2 bits of
+ * 'phys'.
+ */
+static void __flush_cache_one(unsigned long addr, unsigned long phys,
+ unsigned long exec_offset)
+{
+ int way_count;
+ unsigned long base_addr = addr;
+ struct cache_info *dcache;
+ unsigned long way_incr;
+ unsigned long a, ea, p;
+ unsigned long temp_pc;
+
+ dcache = &boot_cpu_data.dcache;
+ /* Write this way for better assembly. */
+ way_count = dcache->ways;
+ way_incr = dcache->way_incr;
+
+ /*
+ * Apply exec_offset (i.e. branch to P2 if required.).
+ *
+ * FIXME:
+ *
+ * If I write "=r" for the (temp_pc), it puts this in r6 hence
+ * trashing exec_offset before it's been added on - why? Hence
+ * "=&r" as a 'workaround'
+ */
+ asm volatile("mov.l 1f, %0\n\t"
+ "add %1, %0\n\t"
+ "jmp @%0\n\t"
+ "nop\n\t"
+ ".balign 4\n\t"
+ "1: .long 2f\n\t"
+ "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
+
+ /*
+ * We know there will be >=1 iteration, so write as do-while to avoid
+ * pointless nead-of-loop check for 0 iterations.
+ */
+ do {
+ ea = base_addr + PAGE_SIZE;
+ a = base_addr;
+ p = phys;
+
+ do {
+ *(volatile unsigned long *)a = p;
+ /*
+ * Next line: intentionally not p+32, saves an add, p
+ * will do since only the cache tag bits need to
+ * match.
+ */
+ *(volatile unsigned long *)(a+32) = p;
+ a += 64;
+ p += 64;
+ } while (a < ea);
+
+ base_addr += way_incr;
+ } while (--way_count != 0);
+}
+
+extern void __weak sh4__flush_region_init(void);
+
+/*
+ * SH-4 has virtually indexed and physically tagged cache.
+ */
+void __init sh4_cache_init(void)
+{
+ printk("PVR=%08x CVR=%08x PRR=%08x\n",
+ __raw_readl(CCN_PVR),
+ __raw_readl(CCN_CVR),
+ __raw_readl(CCN_PRR));
+
+ local_flush_icache_range = sh4_flush_icache_range;
+ local_flush_dcache_folio = sh4_flush_dcache_folio;
+ local_flush_cache_all = sh4_flush_cache_all;
+ local_flush_cache_mm = sh4_flush_cache_mm;
+ local_flush_cache_dup_mm = sh4_flush_cache_mm;
+ local_flush_cache_page = sh4_flush_cache_page;
+ local_flush_cache_range = sh4_flush_cache_range;
+
+ sh4__flush_region_init();
+}
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
new file mode 100644
index 0000000000..b509a40758
--- /dev/null
+++ b/arch/sh/mm/cache-sh7705.c
@@ -0,0 +1,200 @@
+/*
+ * arch/sh/mm/cache-sh7705.c
+ *
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ * Copyright (C) 2004 Alex Song
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/threads.h>
+#include <asm/addrspace.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+/*
+ * The 32KB cache on the SH7705 suffers from the same synonym problem
+ * as SH4 CPUs
+ */
+static inline void cache_wback_all(void)
+{
+ unsigned long ways, waysize, addrstart;
+
+ ways = current_cpu_data.dcache.ways;
+ waysize = current_cpu_data.dcache.sets;
+ waysize <<= current_cpu_data.dcache.entry_shift;
+
+ addrstart = CACHE_OC_ADDRESS_ARRAY;
+
+ do {
+ unsigned long addr;
+
+ for (addr = addrstart;
+ addr < addrstart + waysize;
+ addr += current_cpu_data.dcache.linesz) {
+ unsigned long data;
+ int v = SH_CACHE_UPDATED | SH_CACHE_VALID;
+
+ data = __raw_readl(addr);
+
+ if ((data & v) == v)
+ __raw_writel(data & ~v, addr);
+
+ }
+
+ addrstart += current_cpu_data.dcache.way_incr;
+ } while (--ways);
+}
+
+/*
+ * Write back the range of D-cache, and purge the I-cache.
+ *
+ * Called from kernel/module.c:sys_init_module and routine for a.out format.
+ */
+static void sh7705_flush_icache_range(void *args)
+{
+ struct flusher_data *data = args;
+ unsigned long start, end;
+
+ start = data->addr1;
+ end = data->addr2;
+
+ __flush_wback_region((void *)start, end - start);
+}
+
+/*
+ * Writeback&Invalidate the D-cache of the page
+ */
+static void __flush_dcache_page(unsigned long phys)
+{
+ unsigned long ways, waysize, addrstart;
+ unsigned long flags;
+
+ phys |= SH_CACHE_VALID;
+
+ /*
+ * Here, phys is the physical address of the page. We check all the
+ * tags in the cache for those with the same page number as this page
+ * (by masking off the lowest 2 bits of the 19-bit tag; these bits are
+ * derived from the offset within in the 4k page). Matching valid
+ * entries are invalidated.
+ *
+ * Since 2 bits of the cache index are derived from the virtual page
+ * number, knowing this would reduce the number of cache entries to be
+ * searched by a factor of 4. However this function exists to deal with
+ * potential cache aliasing, therefore the optimisation is probably not
+ * possible.
+ */
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ ways = current_cpu_data.dcache.ways;
+ waysize = current_cpu_data.dcache.sets;
+ waysize <<= current_cpu_data.dcache.entry_shift;
+
+ addrstart = CACHE_OC_ADDRESS_ARRAY;
+
+ do {
+ unsigned long addr;
+
+ for (addr = addrstart;
+ addr < addrstart + waysize;
+ addr += current_cpu_data.dcache.linesz) {
+ unsigned long data;
+
+ data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
+ if (data == phys) {
+ data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED);
+ __raw_writel(data, addr);
+ }
+ }
+
+ addrstart += current_cpu_data.dcache.way_incr;
+ } while (--ways);
+
+ back_to_cached();
+ local_irq_restore(flags);
+}
+
+/*
+ * Write back & invalidate the D-cache of the page.
+ * (To avoid "alias" issues)
+ */
+static void sh7705_flush_dcache_folio(void *arg)
+{
+ struct folio *folio = arg;
+ struct address_space *mapping = folio_flush_mapping(folio);
+
+ if (mapping && !mapping_mapped(mapping))
+ clear_bit(PG_dcache_clean, &folio->flags);
+ else {
+ unsigned long pfn = folio_pfn(folio);
+ unsigned int i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++)
+ __flush_dcache_page((pfn + i) * PAGE_SIZE);
+ }
+}
+
+static void sh7705_flush_cache_all(void *args)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ cache_wback_all();
+ back_to_cached();
+ local_irq_restore(flags);
+}
+
+/*
+ * Write back and invalidate I/D-caches for the page.
+ *
+ * ADDRESS: Virtual Address (U0 address)
+ */
+static void sh7705_flush_cache_page(void *args)
+{
+ struct flusher_data *data = args;
+ unsigned long pfn = data->addr2;
+
+ __flush_dcache_page(pfn << PAGE_SHIFT);
+}
+
+/*
+ * This is called when a page-cache page is about to be mapped into a
+ * user process' address space. It offers an opportunity for a
+ * port to ensure d-cache/i-cache coherency if necessary.
+ *
+ * Not entirely sure why this is necessary on SH3 with 32K cache but
+ * without it we get occasional "Memory fault" when loading a program.
+ */
+static void sh7705_flush_icache_folio(void *arg)
+{
+ struct folio *folio = arg;
+ __flush_purge_region(folio_address(folio), folio_size(folio));
+}
+
+void __init sh7705_cache_init(void)
+{
+ local_flush_icache_range = sh7705_flush_icache_range;
+ local_flush_dcache_folio = sh7705_flush_dcache_folio;
+ local_flush_cache_all = sh7705_flush_cache_all;
+ local_flush_cache_mm = sh7705_flush_cache_all;
+ local_flush_cache_dup_mm = sh7705_flush_cache_all;
+ local_flush_cache_range = sh7705_flush_cache_all;
+ local_flush_cache_page = sh7705_flush_cache_page;
+ local_flush_icache_folio = sh7705_flush_icache_folio;
+}
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c
new file mode 100644
index 0000000000..24c58b7dc0
--- /dev/null
+++ b/arch/sh/mm/cache-shx3.c
@@ -0,0 +1,44 @@
+/*
+ * arch/sh/mm/cache-shx3.c - SH-X3 optimized cache ops
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/cache.h>
+
+#define CCR_CACHE_SNM 0x40000 /* Hardware-assisted synonym avoidance */
+#define CCR_CACHE_IBE 0x1000000 /* ICBI broadcast */
+
+void __init shx3_cache_init(void)
+{
+ unsigned int ccr;
+
+ ccr = __raw_readl(SH_CCR);
+
+ /*
+ * If we've got cache aliases, resolve them in hardware.
+ */
+ if (boot_cpu_data.dcache.n_aliases || boot_cpu_data.icache.n_aliases) {
+ ccr |= CCR_CACHE_SNM;
+
+ boot_cpu_data.icache.n_aliases = 0;
+ boot_cpu_data.dcache.n_aliases = 0;
+
+ pr_info("Enabling hardware synonym avoidance\n");
+ }
+
+#ifdef CONFIG_SMP
+ /*
+ * Broadcast I-cache block invalidations by default.
+ */
+ ccr |= CCR_CACHE_IBE;
+#endif
+
+ writel_uncached(ccr, SH_CCR);
+}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
new file mode 100644
index 0000000000..9bcaa5619e
--- /dev/null
+++ b/arch/sh/mm/cache.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/sh/mm/cache.c
+ *
+ * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
+ * Copyright (C) 2002 - 2010 Paul Mundt
+ */
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+void (*local_flush_cache_all)(void *args) = cache_noop;
+void (*local_flush_cache_mm)(void *args) = cache_noop;
+void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
+void (*local_flush_cache_page)(void *args) = cache_noop;
+void (*local_flush_cache_range)(void *args) = cache_noop;
+void (*local_flush_dcache_folio)(void *args) = cache_noop;
+void (*local_flush_icache_range)(void *args) = cache_noop;
+void (*local_flush_icache_folio)(void *args) = cache_noop;
+void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
+
+void (*__flush_wback_region)(void *start, int size);
+EXPORT_SYMBOL(__flush_wback_region);
+void (*__flush_purge_region)(void *start, int size);
+EXPORT_SYMBOL(__flush_purge_region);
+void (*__flush_invalidate_region)(void *start, int size);
+EXPORT_SYMBOL(__flush_invalidate_region);
+
+static inline void noop__flush_region(void *start, int size)
+{
+}
+
+static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
+ int wait)
+{
+ preempt_disable();
+
+ /* Needing IPI for cross-core flush is SHX3-specific. */
+#ifdef CONFIG_CPU_SHX3
+ /*
+ * It's possible that this gets called early on when IRQs are
+ * still disabled due to ioremapping by the boot CPU, so don't
+ * even attempt IPIs unless there are other CPUs online.
+ */
+ if (num_online_cpus() > 1)
+ smp_call_function(func, info, wait);
+#endif
+
+ func(info);
+
+ preempt_enable();
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ struct folio *folio = page_folio(page);
+
+ if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
+ test_bit(PG_dcache_clean, &folio->flags)) {
+ void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ memcpy(vto, src, len);
+ kunmap_coherent(vto);
+ } else {
+ memcpy(dst, src, len);
+ if (boot_cpu_data.dcache.n_aliases)
+ clear_bit(PG_dcache_clean, &folio->flags);
+ }
+
+ if (vma->vm_flags & VM_EXEC)
+ flush_cache_page(vma, vaddr, page_to_pfn(page));
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ struct folio *folio = page_folio(page);
+
+ if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
+ test_bit(PG_dcache_clean, &folio->flags)) {
+ void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ memcpy(dst, vfrom, len);
+ kunmap_coherent(vfrom);
+ } else {
+ memcpy(dst, src, len);
+ if (boot_cpu_data.dcache.n_aliases)
+ clear_bit(PG_dcache_clean, &folio->flags);
+ }
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ struct folio *src = page_folio(from);
+ void *vfrom, *vto;
+
+ vto = kmap_atomic(to);
+
+ if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) &&
+ test_bit(PG_dcache_clean, &src->flags)) {
+ vfrom = kmap_coherent(from, vaddr);
+ copy_page(vto, vfrom);
+ kunmap_coherent(vfrom);
+ } else {
+ vfrom = kmap_atomic(from);
+ copy_page(vto, vfrom);
+ kunmap_atomic(vfrom);
+ }
+
+ if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
+ (vma->vm_flags & VM_EXEC))
+ __flush_purge_region(vto, PAGE_SIZE);
+
+ kunmap_atomic(vto);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
+}
+EXPORT_SYMBOL(copy_user_highpage);
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ void *kaddr = kmap_atomic(page);
+
+ clear_page(kaddr);
+
+ if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
+ __flush_purge_region(kaddr, PAGE_SIZE);
+
+ kunmap_atomic(kaddr);
+}
+EXPORT_SYMBOL(clear_user_highpage);
+
+void __update_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ if (!boot_cpu_data.dcache.n_aliases)
+ return;
+
+ if (pfn_valid(pfn)) {
+ struct folio *folio = page_folio(pfn_to_page(pfn));
+ int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags);
+ if (dirty)
+ __flush_purge_region(folio_address(folio),
+ folio_size(folio));
+ }
+}
+
+void __flush_anon_page(struct page *page, unsigned long vmaddr)
+{
+ struct folio *folio = page_folio(page);
+ unsigned long addr = (unsigned long) page_address(page);
+
+ if (pages_do_alias(addr, vmaddr)) {
+ if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
+ test_bit(PG_dcache_clean, &folio->flags)) {
+ void *kaddr;
+
+ kaddr = kmap_coherent(page, vmaddr);
+ /* XXX.. For now kunmap_coherent() does a purge */
+ /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
+ kunmap_coherent(kaddr);
+ } else
+ __flush_purge_region(folio_address(folio),
+ folio_size(folio));
+ }
+}
+
+void flush_cache_all(void)
+{
+ cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
+}
+EXPORT_SYMBOL(flush_cache_all);
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+ if (boot_cpu_data.dcache.n_aliases == 0)
+ return;
+
+ cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
+}
+
+void flush_cache_dup_mm(struct mm_struct *mm)
+{
+ if (boot_cpu_data.dcache.n_aliases == 0)
+ return;
+
+ cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn)
+{
+ struct flusher_data data;
+
+ data.vma = vma;
+ data.addr1 = addr;
+ data.addr2 = pfn;
+
+ cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct flusher_data data;
+
+ data.vma = vma;
+ data.addr1 = start;
+ data.addr2 = end;
+
+ cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
+}
+EXPORT_SYMBOL(flush_cache_range);
+
+void flush_dcache_folio(struct folio *folio)
+{
+ cacheop_on_each_cpu(local_flush_dcache_folio, folio, 1);
+}
+EXPORT_SYMBOL(flush_dcache_folio);
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ struct flusher_data data;
+
+ data.vma = NULL;
+ data.addr1 = start;
+ data.addr2 = end;
+
+ cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+ unsigned int nr)
+{
+ /* Nothing uses the VMA, so just pass the folio along */
+ cacheop_on_each_cpu(local_flush_icache_folio, page_folio(page), 1);
+}
+
+void flush_cache_sigtramp(unsigned long address)
+{
+ cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
+}
+
+static void compute_alias(struct cache_info *c)
+{
+#ifdef CONFIG_MMU
+ c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
+#else
+ c->alias_mask = 0;
+#endif
+ c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
+}
+
+static void __init emit_cache_params(void)
+{
+ printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
+ boot_cpu_data.icache.ways,
+ boot_cpu_data.icache.sets,
+ boot_cpu_data.icache.way_incr);
+ printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
+ boot_cpu_data.icache.entry_mask,
+ boot_cpu_data.icache.alias_mask,
+ boot_cpu_data.icache.n_aliases);
+ printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
+ boot_cpu_data.dcache.ways,
+ boot_cpu_data.dcache.sets,
+ boot_cpu_data.dcache.way_incr);
+ printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
+ boot_cpu_data.dcache.entry_mask,
+ boot_cpu_data.dcache.alias_mask,
+ boot_cpu_data.dcache.n_aliases);
+
+ /*
+ * Emit Secondary Cache parameters if the CPU has a probed L2.
+ */
+ if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
+ printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
+ boot_cpu_data.scache.ways,
+ boot_cpu_data.scache.sets,
+ boot_cpu_data.scache.way_incr);
+ printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
+ boot_cpu_data.scache.entry_mask,
+ boot_cpu_data.scache.alias_mask,
+ boot_cpu_data.scache.n_aliases);
+ }
+}
+
+void __init cpu_cache_init(void)
+{
+ unsigned int cache_disabled = 0;
+
+#ifdef SH_CCR
+ cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
+#endif
+
+ compute_alias(&boot_cpu_data.icache);
+ compute_alias(&boot_cpu_data.dcache);
+ compute_alias(&boot_cpu_data.scache);
+
+ __flush_wback_region = noop__flush_region;
+ __flush_purge_region = noop__flush_region;
+ __flush_invalidate_region = noop__flush_region;
+
+ /*
+ * No flushing is necessary in the disabled cache case so we can
+ * just keep the noop functions in local_flush_..() and __flush_..()
+ */
+ if (unlikely(cache_disabled))
+ goto skip;
+
+ if (boot_cpu_data.type == CPU_J2) {
+ extern void __weak j2_cache_init(void);
+
+ j2_cache_init();
+ } else if (boot_cpu_data.family == CPU_FAMILY_SH2) {
+ extern void __weak sh2_cache_init(void);
+
+ sh2_cache_init();
+ }
+
+ if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
+ extern void __weak sh2a_cache_init(void);
+
+ sh2a_cache_init();
+ }
+
+ if (boot_cpu_data.family == CPU_FAMILY_SH3) {
+ extern void __weak sh3_cache_init(void);
+
+ sh3_cache_init();
+
+ if ((boot_cpu_data.type == CPU_SH7705) &&
+ (boot_cpu_data.dcache.sets == 512)) {
+ extern void __weak sh7705_cache_init(void);
+
+ sh7705_cache_init();
+ }
+ }
+
+ if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
+ (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
+ (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
+ extern void __weak sh4_cache_init(void);
+
+ sh4_cache_init();
+
+ if ((boot_cpu_data.type == CPU_SH7786) ||
+ (boot_cpu_data.type == CPU_SHX3)) {
+ extern void __weak shx3_cache_init(void);
+
+ shx3_cache_init();
+ }
+ }
+
+skip:
+ emit_cache_params();
+}
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
new file mode 100644
index 0000000000..0de206c1ac
--- /dev/null
+++ b/arch/sh/mm/consistent.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2004 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+static int __init memchunk_setup(char *str)
+{
+ return 1; /* accept anything that begins with "memchunk." */
+}
+__setup("memchunk.", memchunk_setup);
+
+static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
+{
+ char *p = boot_command_line;
+ int k = strlen(name);
+
+ while ((p = strstr(p, "memchunk."))) {
+ p += 9; /* strlen("memchunk.") */
+ if (!strncmp(name, p, k) && p[k] == '=') {
+ p += k + 1;
+ *sizep = memparse(p, NULL);
+ pr_info("%s: forcing memory chunk size to 0x%08lx\n",
+ name, *sizep);
+ break;
+ }
+ }
+}
+
+int __init platform_resource_setup_memory(struct platform_device *pdev,
+ char *name, unsigned long memsize)
+{
+ struct resource *r;
+ dma_addr_t dma_handle;
+ void *buf;
+
+ r = pdev->resource + pdev->num_resources - 1;
+ if (r->flags) {
+ pr_warn("%s: unable to find empty space for resource\n", name);
+ return -EINVAL;
+ }
+
+ memchunk_cmdline_override(name, &memsize);
+ if (!memsize)
+ return 0;
+
+ buf = dma_alloc_coherent(&pdev->dev, memsize, &dma_handle, GFP_KERNEL);
+ if (!buf) {
+ pr_warn("%s: unable to allocate memory\n", name);
+ return -ENOMEM;
+ }
+
+ r->flags = IORESOURCE_MEM;
+ r->start = dma_handle;
+ r->end = r->start + memsize - 1;
+ r->name = name;
+ return 0;
+}
diff --git a/arch/sh/mm/extable_32.c b/arch/sh/mm/extable_32.c
new file mode 100644
index 0000000000..14312027bb
--- /dev/null
+++ b/arch/sh/mm/extable_32.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/arch/sh/mm/extable.c
+ * Taken from:
+ * linux/arch/i386/mm/extable.c
+ */
+
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+
+#include <asm/ptrace.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(regs->pc);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
new file mode 100644
index 0000000000..06e6b49529
--- /dev/null
+++ b/arch/sh/mm/fault.c
@@ -0,0 +1,491 @@
+/*
+ * Page fault handler for SH with an MMU.
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2003 - 2012 Paul Mundt
+ *
+ * Based on linux/arch/i386/mm/fault.c:
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/hardirq.h>
+#include <linux/kprobes.h>
+#include <linux/perf_event.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <asm/io_trapped.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+static void
+force_sig_info_fault(int si_signo, int si_code, unsigned long address)
+{
+ force_sig_fault(si_signo, si_code, (void __user *)address);
+}
+
+/*
+ * This is useful to dump out the page tables associated with
+ * 'addr' in mm 'mm'.
+ */
+static void show_pte(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+
+ if (mm) {
+ pgd = mm->pgd;
+ } else {
+ pgd = get_TTB();
+
+ if (unlikely(!pgd))
+ pgd = swapper_pg_dir;
+ }
+
+ pr_alert("pgd = %p\n", pgd);
+ pgd += pgd_index(addr);
+ pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2),
+ (u64)pgd_val(*pgd));
+
+ do {
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (pgd_none(*pgd))
+ break;
+
+ if (pgd_bad(*pgd)) {
+ pr_cont("(bad)");
+ break;
+ }
+
+ p4d = p4d_offset(pgd, addr);
+ if (PTRS_PER_P4D != 1)
+ pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2),
+ (u64)p4d_val(*p4d));
+
+ if (p4d_none(*p4d))
+ break;
+
+ if (p4d_bad(*p4d)) {
+ pr_cont("(bad)");
+ break;
+ }
+
+ pud = pud_offset(p4d, addr);
+ if (PTRS_PER_PUD != 1)
+ pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2),
+ (u64)pud_val(*pud));
+
+ if (pud_none(*pud))
+ break;
+
+ if (pud_bad(*pud)) {
+ pr_cont("(bad)");
+ break;
+ }
+
+ pmd = pmd_offset(pud, addr);
+ if (PTRS_PER_PMD != 1)
+ pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2),
+ (u64)pmd_val(*pmd));
+
+ if (pmd_none(*pmd))
+ break;
+
+ if (pmd_bad(*pmd)) {
+ pr_cont("(bad)");
+ break;
+ }
+
+ /* We must not map this if we have highmem enabled */
+ if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
+ break;
+
+ pte = pte_offset_kernel(pmd, addr);
+ pr_cont(", *pte=%0*llx", (u32)(sizeof(*pte) * 2),
+ (u64)pte_val(*pte));
+ } while (0);
+
+ pr_cont("\n");
+}
+
+static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
+{
+ unsigned index = pgd_index(address);
+ pgd_t *pgd_k;
+ p4d_t *p4d, *p4d_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+
+ pgd += index;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ return NULL;
+
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ return NULL;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
+ if (!pud_present(*pud_k))
+ return NULL;
+
+ if (!pud_present(*pud))
+ set_pud(pud, *pud_k);
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ return NULL;
+
+ if (!pmd_present(*pmd))
+ set_pmd(pmd, *pmd_k);
+ else {
+ /*
+ * The page tables are fully synchronised so there must
+ * be another reason for the fault. Return NULL here to
+ * signal that we have not taken care of the fault.
+ */
+ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+ return NULL;
+ }
+
+ return pmd_k;
+}
+
+#ifdef CONFIG_SH_STORE_QUEUES
+#define __FAULT_ADDR_LIMIT P3_ADDR_MAX
+#else
+#define __FAULT_ADDR_LIMIT VMALLOC_END
+#endif
+
+/*
+ * Handle a fault on the vmalloc or module mapping area
+ */
+static noinline int vmalloc_fault(unsigned long address)
+{
+ pgd_t *pgd_k;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+
+ /* Make sure we are in vmalloc/module/P3 area: */
+ if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
+ return -1;
+
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "current" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ pgd_k = get_TTB();
+ pmd_k = vmalloc_sync_one(pgd_k, address);
+ if (!pmd_k)
+ return -1;
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ return -1;
+
+ return 0;
+}
+
+static void
+show_fault_oops(struct pt_regs *regs, unsigned long address)
+{
+ if (!oops_may_print())
+ return;
+
+ pr_alert("BUG: unable to handle kernel %s at %08lx\n",
+ address < PAGE_SIZE ? "NULL pointer dereference"
+ : "paging request",
+ address);
+ pr_alert("PC:");
+ printk_address(regs->pc, 1);
+
+ show_pte(NULL, address);
+}
+
+static noinline void
+no_context(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+{
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+ if (handle_trapped_io(regs, address))
+ return;
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+
+ show_fault_oops(regs, address);
+
+ die("Oops", regs, error_code);
+}
+
+static void
+__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address, int si_code)
+{
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ /*
+ * It's possible to have interrupts off here:
+ */
+ local_irq_enable();
+
+ force_sig_info_fault(SIGSEGV, si_code, address);
+
+ return;
+ }
+
+ no_context(regs, error_code, address);
+}
+
+static noinline void
+bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+{
+ __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
+}
+
+static void
+__bad_area(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address, int si_code)
+{
+ struct mm_struct *mm = current->mm;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+ mmap_read_unlock(mm);
+
+ __bad_area_nosemaphore(regs, error_code, address, si_code);
+}
+
+static noinline void
+bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
+{
+ __bad_area(regs, error_code, address, SEGV_MAPERR);
+}
+
+static noinline void
+bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+{
+ __bad_area(regs, error_code, address, SEGV_ACCERR);
+}
+
+static void
+do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+
+ mmap_read_unlock(mm);
+
+ /* Kernel mode? Handle exceptions or die: */
+ if (!user_mode(regs))
+ no_context(regs, error_code, address);
+
+ force_sig_info_fault(SIGBUS, BUS_ADRERR, address);
+}
+
+static noinline int
+mm_fault_error(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address, vm_fault_t fault)
+{
+ /*
+ * Pagefault was interrupted by SIGKILL. We have no reason to
+ * continue pagefault.
+ */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, error_code, address);
+ return 1;
+ }
+
+ /* Release mmap_lock first if necessary */
+ if (!(fault & VM_FAULT_RETRY))
+ mmap_read_unlock(current->mm);
+
+ if (!(fault & VM_FAULT_ERROR))
+ return 0;
+
+ if (fault & VM_FAULT_OOM) {
+ /* Kernel mode? Handle exceptions or die: */
+ if (!user_mode(regs)) {
+ no_context(regs, error_code, address);
+ return 1;
+ }
+
+ /*
+ * We ran out of memory, call the OOM killer, and return the
+ * userspace (which will retry the fault, or kill us if we got
+ * oom-killed):
+ */
+ pagefault_out_of_memory();
+ } else {
+ if (fault & VM_FAULT_SIGBUS)
+ do_sigbus(regs, error_code, address);
+ else if (fault & VM_FAULT_SIGSEGV)
+ bad_area(regs, error_code, address);
+ else
+ BUG();
+ }
+
+ return 1;
+}
+
+static inline int access_error(int error_code, struct vm_area_struct *vma)
+{
+ if (error_code & FAULT_CODE_WRITE) {
+ /* write, present and write, not present: */
+ if (unlikely(!(vma->vm_flags & VM_WRITE)))
+ return 1;
+ return 0;
+ }
+
+ /* ITLB miss on NX page */
+ if (unlikely((error_code & FAULT_CODE_ITLB) &&
+ !(vma->vm_flags & VM_EXEC)))
+ return 1;
+
+ /* read, not present: */
+ if (unlikely(!vma_is_accessible(vma)))
+ return 1;
+
+ return 0;
+}
+
+static int fault_in_kernel_space(unsigned long address)
+{
+ return address >= TASK_SIZE;
+}
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long error_code,
+ unsigned long address)
+{
+ unsigned long vec;
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ vm_fault_t fault;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
+
+ tsk = current;
+ mm = tsk->mm;
+ vec = lookup_exception_vector();
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (unlikely(fault_in_kernel_space(address))) {
+ if (vmalloc_fault(address) >= 0)
+ return;
+ if (kprobe_page_fault(regs, vec))
+ return;
+
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+
+ if (unlikely(kprobe_page_fault(regs, vec)))
+ return;
+
+ /* Only enable interrupts if they were on before the fault */
+ if ((regs->sr & SR_IMASK) != SR_IMASK)
+ local_irq_enable();
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+ /*
+ * If we're in an interrupt, have no user context or are running
+ * with pagefaults disabled then we must not take the fault:
+ */
+ if (unlikely(faulthandler_disabled() || !mm)) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+
+retry:
+ vma = lock_mm_and_find_vma(mm, address, regs);
+ if (unlikely(!vma)) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+ if (unlikely(access_error(error_code, vma))) {
+ bad_area_access_error(regs, error_code, address);
+ return;
+ }
+
+ set_thread_fault_code(error_code);
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+ if (error_code & FAULT_CODE_WRITE)
+ flags |= FAULT_FLAG_WRITE;
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
+ if (mm_fault_error(regs, error_code, address, fault))
+ return;
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
+ }
+
+ mmap_read_unlock(mm);
+}
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c
new file mode 100644
index 0000000000..8b8ef89721
--- /dev/null
+++ b/arch/sh/mm/flush-sh4.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
+#include <asm/mmu_context.h>
+#include <asm/cache_insns.h>
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+
+/*
+ * Write back the dirty D-caches, but not invalidate them.
+ *
+ * START: Virtual Address (U0, P1, or P3)
+ * SIZE: Size of the region.
+ */
+static void sh4__flush_wback_region(void *start, int size)
+{
+ reg_size_t aligned_start, v, cnt, end;
+
+ aligned_start = register_align(start);
+ v = aligned_start & ~(L1_CACHE_BYTES-1);
+ end = (aligned_start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+ cnt = (end - v) / L1_CACHE_BYTES;
+
+ while (cnt >= 8) {
+ __ocbwb(v); v += L1_CACHE_BYTES;
+ __ocbwb(v); v += L1_CACHE_BYTES;
+ __ocbwb(v); v += L1_CACHE_BYTES;
+ __ocbwb(v); v += L1_CACHE_BYTES;
+ __ocbwb(v); v += L1_CACHE_BYTES;
+ __ocbwb(v); v += L1_CACHE_BYTES;
+ __ocbwb(v); v += L1_CACHE_BYTES;
+ __ocbwb(v); v += L1_CACHE_BYTES;
+ cnt -= 8;
+ }
+
+ while (cnt) {
+ __ocbwb(v); v += L1_CACHE_BYTES;
+ cnt--;
+ }
+}
+
+/*
+ * Write back the dirty D-caches and invalidate them.
+ *
+ * START: Virtual Address (U0, P1, or P3)
+ * SIZE: Size of the region.
+ */
+static void sh4__flush_purge_region(void *start, int size)
+{
+ reg_size_t aligned_start, v, cnt, end;
+
+ aligned_start = register_align(start);
+ v = aligned_start & ~(L1_CACHE_BYTES-1);
+ end = (aligned_start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+ cnt = (end - v) / L1_CACHE_BYTES;
+
+ while (cnt >= 8) {
+ __ocbp(v); v += L1_CACHE_BYTES;
+ __ocbp(v); v += L1_CACHE_BYTES;
+ __ocbp(v); v += L1_CACHE_BYTES;
+ __ocbp(v); v += L1_CACHE_BYTES;
+ __ocbp(v); v += L1_CACHE_BYTES;
+ __ocbp(v); v += L1_CACHE_BYTES;
+ __ocbp(v); v += L1_CACHE_BYTES;
+ __ocbp(v); v += L1_CACHE_BYTES;
+ cnt -= 8;
+ }
+ while (cnt) {
+ __ocbp(v); v += L1_CACHE_BYTES;
+ cnt--;
+ }
+}
+
+/*
+ * No write back please
+ */
+static void sh4__flush_invalidate_region(void *start, int size)
+{
+ reg_size_t aligned_start, v, cnt, end;
+
+ aligned_start = register_align(start);
+ v = aligned_start & ~(L1_CACHE_BYTES-1);
+ end = (aligned_start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+ cnt = (end - v) / L1_CACHE_BYTES;
+
+ while (cnt >= 8) {
+ __ocbi(v); v += L1_CACHE_BYTES;
+ __ocbi(v); v += L1_CACHE_BYTES;
+ __ocbi(v); v += L1_CACHE_BYTES;
+ __ocbi(v); v += L1_CACHE_BYTES;
+ __ocbi(v); v += L1_CACHE_BYTES;
+ __ocbi(v); v += L1_CACHE_BYTES;
+ __ocbi(v); v += L1_CACHE_BYTES;
+ __ocbi(v); v += L1_CACHE_BYTES;
+ cnt -= 8;
+ }
+
+ while (cnt) {
+ __ocbi(v); v += L1_CACHE_BYTES;
+ cnt--;
+ }
+}
+
+void __init sh4__flush_region_init(void)
+{
+ __flush_wback_region = sh4__flush_wback_region;
+ __flush_invalidate_region = sh4__flush_invalidate_region;
+ __flush_purge_region = sh4__flush_purge_region;
+}
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
new file mode 100644
index 0000000000..6cb0ad73db
--- /dev/null
+++ b/arch/sh/mm/hugetlbpage.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/sh/mm/hugetlbpage.c
+ *
+ * SuperH HugeTLB page support.
+ *
+ * Cloned from sparc64 by Paul Mundt.
+ *
+ * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/sysctl.h>
+
+#include <asm/mman.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd) {
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (p4d) {
+ pud = pud_alloc(mm, p4d, addr);
+ if (pud) {
+ pmd = pmd_alloc(mm, pud, addr);
+ if (pmd)
+ pte = pte_alloc_huge(mm, pmd, addr);
+ }
+ }
+ }
+
+ return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd) {
+ p4d = p4d_offset(pgd, addr);
+ if (p4d) {
+ pud = pud_offset(p4d, addr);
+ if (pud) {
+ pmd = pmd_offset(pud, addr);
+ if (pmd)
+ pte = pte_offset_huge(pmd, addr);
+ }
+ }
+ }
+
+ return pte;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return 0;
+}
+
+int pud_huge(pud_t pud)
+{
+ return 0;
+}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
new file mode 100644
index 0000000000..bf1b540553
--- /dev/null
+++ b/arch/sh/mm/init.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/arch/sh/mm/init.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2002 - 2011 Paul Mundt
+ *
+ * Based on linux/arch/i386/mm/init.c:
+ * Copyright (C) 1995 Linus Torvalds
+ */
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/memblock.h>
+#include <linux/proc_fs.h>
+#include <linux/pagemap.h>
+#include <linux/percpu.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
+#include <asm/kexec.h>
+#include <asm/tlb.h>
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/cache.h>
+#include <asm/pgalloc.h>
+#include <linux/sizes.h>
+#include "ioremap.h"
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+void __init generic_mem_init(void)
+{
+ memblock_add(__MEMORY_START, __MEMORY_SIZE);
+}
+
+void __init __weak plat_mem_setup(void)
+{
+ /* Nothing to see here, move along. */
+}
+
+#ifdef CONFIG_MMU
+static pte_t *__get_pte_phys(unsigned long addr)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd)) {
+ pgd_ERROR(*pgd);
+ return NULL;
+ }
+
+ p4d = p4d_alloc(NULL, pgd, addr);
+ if (unlikely(!p4d)) {
+ p4d_ERROR(*p4d);
+ return NULL;
+ }
+
+ pud = pud_alloc(NULL, p4d, addr);
+ if (unlikely(!pud)) {
+ pud_ERROR(*pud);
+ return NULL;
+ }
+
+ pmd = pmd_alloc(NULL, pud, addr);
+ if (unlikely(!pmd)) {
+ pmd_ERROR(*pmd);
+ return NULL;
+ }
+
+ return pte_offset_kernel(pmd, addr);
+}
+
+static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
+{
+ pte_t *pte;
+
+ pte = __get_pte_phys(addr);
+ if (!pte_none(*pte)) {
+ pte_ERROR(*pte);
+ return;
+ }
+
+ set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
+ local_flush_tlb_one(get_asid(), addr);
+
+ if (pgprot_val(prot) & _PAGE_WIRED)
+ tlb_wire_entry(NULL, addr, *pte);
+}
+
+static void clear_pte_phys(unsigned long addr, pgprot_t prot)
+{
+ pte_t *pte;
+
+ pte = __get_pte_phys(addr);
+
+ if (pgprot_val(prot) & _PAGE_WIRED)
+ tlb_unwire_entry();
+
+ set_pte(pte, pfn_pte(0, __pgprot(0)));
+ local_flush_tlb_one(get_asid(), addr);
+}
+
+void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ set_pte_phys(address, phys, prot);
+}
+
+void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ clear_pte_phys(address, prot);
+}
+
+static pmd_t * __init one_md_table_init(pud_t *pud)
+{
+ if (pud_none(*pud)) {
+ pmd_t *pmd;
+
+ pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pmd)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+ pud_populate(&init_mm, pud, pmd);
+ BUG_ON(pmd != pmd_offset(pud, 0));
+ }
+
+ return pmd_offset(pud, 0);
+}
+
+static pte_t * __init one_page_table_init(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *pte;
+
+ pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, pte);
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ }
+
+ return pte_offset_kernel(pmd, 0);
+}
+
+static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
+ unsigned long vaddr, pte_t *lastpte)
+{
+ return pte;
+}
+
+void __init page_table_range_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+ int i, j, k;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = pgd_index(vaddr);
+ j = pud_index(vaddr);
+ k = pmd_index(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+ pud = (pud_t *)pgd;
+ for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+ pmd = one_md_table_init(pud);
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd += k;
+#endif
+ for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
+ pte = page_table_kmap_check(one_page_table_init(pmd),
+ pmd, vaddr, pte);
+ vaddr += PMD_SIZE;
+ }
+ k = 0;
+ }
+ j = 0;
+ }
+}
+#endif /* CONFIG_MMU */
+
+void __init allocate_pgdat(unsigned int nid)
+{
+ unsigned long start_pfn, end_pfn;
+
+ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+
+#ifdef CONFIG_NUMA
+ NODE_DATA(nid) = memblock_alloc_try_nid(
+ sizeof(struct pglist_data),
+ SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ if (!NODE_DATA(nid))
+ panic("Can't allocate pgdat for node %d\n", nid);
+#endif
+
+ NODE_DATA(nid)->node_start_pfn = start_pfn;
+ NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+}
+
+static void __init do_init_bootmem(void)
+{
+ unsigned long start_pfn, end_pfn;
+ int i;
+
+ /* Add active regions with valid PFNs. */
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
+ __add_active_range(0, start_pfn, end_pfn);
+
+ /* All of system RAM sits in node 0 for the non-NUMA case */
+ allocate_pgdat(0);
+ node_set_online(0);
+
+ plat_mem_setup();
+
+ sparse_init();
+}
+
+static void __init early_reserve_mem(void)
+{
+ unsigned long start_pfn;
+ u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
+ u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
+
+ /*
+ * Partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+ start_pfn = PFN_UP(__pa(_end));
+
+ /*
+ * Reserve the kernel text and Reserve the bootmem bitmap. We do
+ * this in two steps (first step was init_bootmem()), because
+ * this catches the (definitely buggy) case of us accidentally
+ * initializing the bootmem allocator with an invalid RAM area.
+ */
+ memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
+
+ /*
+ * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
+ */
+ if (CONFIG_ZERO_PAGE_OFFSET != 0)
+ memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
+
+ /*
+ * Handle additional early reservations
+ */
+ check_for_initrd();
+ reserve_crashkernel();
+}
+
+void __init paging_init(void)
+{
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+ unsigned long vaddr, end;
+
+ sh_mv.mv_mem_init();
+
+ early_reserve_mem();
+
+ /*
+ * Once the early reservations are out of the way, give the
+ * platforms a chance to kick out some memory.
+ */
+ if (sh_mv.mv_mem_reserve)
+ sh_mv.mv_mem_reserve();
+
+ memblock_enforce_memory_limit(memory_limit);
+ memblock_allow_resize();
+
+ memblock_dump_all();
+
+ /*
+ * Determine low and high memory ranges:
+ */
+ max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
+ min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
+ set_max_mapnr(max_low_pfn - min_low_pfn);
+
+ nodes_clear(node_online_map);
+
+ memory_start = (unsigned long)__va(__MEMORY_START);
+ memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
+
+ uncached_init();
+ pmb_init();
+ do_init_bootmem();
+ ioremap_fixed_init();
+
+ /* We don't need to map the kernel through the TLB, as
+ * it is permanatly mapped using P1. So clear the
+ * entire pgd. */
+ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+
+ /* Set an initial value for the MMU.TTB so we don't have to
+ * check for a null value. */
+ set_TTB(swapper_pg_dir);
+
+ /*
+ * Populate the relevant portions of swapper_pg_dir so that
+ * we can use the fixmap entries without calling kmalloc.
+ * pte's will be filled in by __set_fixmap().
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
+ page_table_range_init(vaddr, end, swapper_pg_dir);
+
+ kmap_coherent_init();
+
+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+ free_area_init(max_zone_pfns);
+}
+
+unsigned int mem_init_done = 0;
+
+void __init mem_init(void)
+{
+ pg_data_t *pgdat;
+
+ high_memory = NULL;
+ for_each_online_pgdat(pgdat)
+ high_memory = max_t(void *, high_memory,
+ __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
+
+ memblock_free_all();
+
+ /* Set this up early, so we can take care of the zero page */
+ cpu_cache_init();
+
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
+ __flush_wback_region(empty_zero_page, PAGE_SIZE);
+
+ vsyscall_init();
+
+ pr_info("virtual kernel memory layout:\n"
+ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
+#ifdef CONFIG_UNCACHED_MAPPING
+ " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
+#endif
+ " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
+ " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
+ " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
+ FIXADDR_START, FIXADDR_TOP,
+ (FIXADDR_TOP - FIXADDR_START) >> 10,
+
+ (unsigned long)VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
+
+ (unsigned long)memory_start, (unsigned long)high_memory,
+ ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
+
+#ifdef CONFIG_UNCACHED_MAPPING
+ uncached_start, uncached_end, uncached_size >> 20,
+#endif
+
+ (unsigned long)&__init_begin, (unsigned long)&__init_end,
+ ((unsigned long)&__init_end -
+ (unsigned long)&__init_begin) >> 10,
+
+ (unsigned long)&_etext, (unsigned long)&_edata,
+ ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
+
+ (unsigned long)&_text, (unsigned long)&_etext,
+ ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+
+ mem_init_done = 1;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_params *params)
+{
+ unsigned long start_pfn = PFN_DOWN(start);
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
+ if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
+ return -EINVAL;
+
+ /* We only have ZONE_NORMAL, so this is easy.. */
+ ret = __add_pages(nid, start_pfn, nr_pages, params);
+ if (unlikely(ret))
+ printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
+
+ return ret;
+}
+
+void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+{
+ unsigned long start_pfn = PFN_DOWN(start);
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+
+ __remove_pages(start_pfn, nr_pages, altmap);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
new file mode 100644
index 0000000000..33d20f3456
--- /dev/null
+++ b/arch/sh/mm/ioremap.c
@@ -0,0 +1,149 @@
+/*
+ * arch/sh/mm/ioremap.c
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ * (C) Copyright 2005 - 2010 Paul Mundt
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ * This is needed for high PCI addresses that aren't mapped in the
+ * 640k-1MB IO memory area on PC's
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ */
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <asm/io_trapped.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/addrspace.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu.h>
+#include "ioremap.h"
+
+/*
+ * On 32-bit SH, we traditionally have the whole physical address space mapped
+ * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
+ * anything but place the address in the proper segment. This is true for P1
+ * and P2 addresses, as well as some P3 ones. However, most of the P3 addresses
+ * and newer cores using extended addressing need to map through page tables, so
+ * the ioremap() implementation becomes a bit more complicated.
+ */
+#ifdef CONFIG_29BIT
+static void __iomem *
+__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
+{
+ phys_addr_t last_addr = offset + size - 1;
+
+ /*
+ * For P1 and P2 space this is trivial, as everything is already
+ * mapped. Uncached access for P1 addresses are done through P2.
+ * In the P3 case or for addresses outside of the 29-bit space,
+ * mapping must be done by the PMB or by using page tables.
+ */
+ if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
+ u64 flags = pgprot_val(prot);
+
+ /*
+ * Anything using the legacy PTEA space attributes needs
+ * to be kicked down to page table mappings.
+ */
+ if (unlikely(flags & _PAGE_PCC_MASK))
+ return NULL;
+ if (unlikely(flags & _PAGE_CACHABLE))
+ return (void __iomem *)P1SEGADDR(offset);
+
+ return (void __iomem *)P2SEGADDR(offset);
+ }
+
+ /* P4 above the store queues are always mapped. */
+ if (unlikely(offset >= P3_ADDR_MAX))
+ return (void __iomem *)P4SEGADDR(offset);
+
+ return NULL;
+}
+#else
+#define __ioremap_29bit(offset, size, prot) NULL
+#endif /* CONFIG_29BIT */
+
+void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
+{
+ void __iomem *mapped;
+ pgprot_t pgprot = __pgprot(prot);
+
+ mapped = __ioremap_trapped(phys_addr, size);
+ if (mapped)
+ return mapped;
+
+ mapped = __ioremap_29bit(phys_addr, size, pgprot);
+ if (mapped)
+ return mapped;
+
+ /*
+ * If we can't yet use the regular approach, go the fixmap route.
+ */
+ if (!mem_init_done)
+ return ioremap_fixed(phys_addr, size, pgprot);
+
+ /*
+ * First try to remap through the PMB.
+ * PMB entries are all pre-faulted.
+ */
+ mapped = pmb_remap_caller(phys_addr, size, pgprot,
+ __builtin_return_address(0));
+ if (mapped && !IS_ERR(mapped))
+ return mapped;
+
+ return generic_ioremap_prot(phys_addr, size, pgprot);
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+/*
+ * Simple checks for non-translatable mappings.
+ */
+static inline int iomapping_nontranslatable(unsigned long offset)
+{
+#ifdef CONFIG_29BIT
+ /*
+ * In 29-bit mode this includes the fixed P1/P2 areas, as well as
+ * parts of P3.
+ */
+ if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
+ return 1;
+#endif
+
+ return 0;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+ unsigned long vaddr = (unsigned long __force)addr;
+
+ /*
+ * Nothing to do if there is no translatable mapping.
+ */
+ if (iomapping_nontranslatable(vaddr))
+ return;
+
+ /*
+ * There's no VMA if it's from an early fixed mapping.
+ */
+ if (iounmap_fixed((void __iomem *)addr) == 0)
+ return;
+
+ /*
+ * If the PMB handled it, there's nothing else to do.
+ */
+ if (pmb_unmap((void __iomem *)addr) == 0)
+ return;
+
+ generic_iounmap(addr);
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/sh/mm/ioremap.h b/arch/sh/mm/ioremap.h
new file mode 100644
index 0000000000..f2544e721a
--- /dev/null
+++ b/arch/sh/mm/ioremap.h
@@ -0,0 +1,23 @@
+#ifndef _SH_MM_IORMEMAP_H
+#define _SH_MM_IORMEMAP_H 1
+
+#ifdef CONFIG_IOREMAP_FIXED
+void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
+int iounmap_fixed(void __iomem *);
+void ioremap_fixed_init(void);
+#else
+static inline void __iomem *
+ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
+{
+ BUG();
+ return NULL;
+}
+static inline void ioremap_fixed_init(void)
+{
+}
+static inline int iounmap_fixed(void __iomem *addr)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_IOREMAP_FIXED */
+#endif /* _SH_MM_IORMEMAP_H */
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c
new file mode 100644
index 0000000000..136113bcac
--- /dev/null
+++ b/arch/sh/mm/ioremap_fixed.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Re-map IO memory to kernel address space so that we can access it.
+ *
+ * These functions should only be used when it is necessary to map a
+ * physical address space into the kernel address space before ioremap()
+ * can be used, e.g. early in boot before paging_init().
+ *
+ * Copyright (C) 2009 Matt Fleming
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/proc_fs.h>
+#include <asm/fixmap.h>
+#include <asm/page.h>
+#include <asm/addrspace.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include "ioremap.h"
+
+struct ioremap_map {
+ void __iomem *addr;
+ unsigned long size;
+ unsigned long fixmap_addr;
+};
+
+static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS];
+
+void __init ioremap_fixed_init(void)
+{
+ struct ioremap_map *map;
+ int i;
+
+ for (i = 0; i < FIX_N_IOREMAPS; i++) {
+ map = &ioremap_maps[i];
+ map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i);
+ }
+}
+
+void __init __iomem *
+ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
+{
+ enum fixed_addresses idx0, idx;
+ struct ioremap_map *map;
+ unsigned int nrpages;
+ unsigned long offset;
+ int i, slot;
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(phys_addr + size) - phys_addr;
+
+ slot = -1;
+ for (i = 0; i < FIX_N_IOREMAPS; i++) {
+ map = &ioremap_maps[i];
+ if (!map->addr) {
+ map->size = size;
+ slot = i;
+ break;
+ }
+ }
+
+ if (slot < 0)
+ return NULL;
+
+ /*
+ * Mappings have to fit in the FIX_IOREMAP area.
+ */
+ nrpages = size >> PAGE_SHIFT;
+ if (nrpages > FIX_N_IOREMAPS)
+ return NULL;
+
+ /*
+ * Ok, go for it..
+ */
+ idx0 = FIX_IOREMAP_BEGIN + slot;
+ idx = idx0;
+ while (nrpages > 0) {
+ pgprot_val(prot) |= _PAGE_WIRED;
+ __set_fixmap(idx, phys_addr, prot);
+ phys_addr += PAGE_SIZE;
+ idx++;
+ --nrpages;
+ }
+
+ map->addr = (void __iomem *)(offset + map->fixmap_addr);
+ return map->addr;
+}
+
+int iounmap_fixed(void __iomem *addr)
+{
+ enum fixed_addresses idx;
+ struct ioremap_map *map;
+ unsigned int nrpages;
+ int i, slot;
+
+ slot = -1;
+ for (i = 0; i < FIX_N_IOREMAPS; i++) {
+ map = &ioremap_maps[i];
+ if (map->addr == addr) {
+ slot = i;
+ break;
+ }
+ }
+
+ /*
+ * If we don't match, it's not for us.
+ */
+ if (slot < 0)
+ return -EINVAL;
+
+ nrpages = map->size >> PAGE_SHIFT;
+
+ idx = FIX_IOREMAP_BEGIN + slot + nrpages - 1;
+ while (nrpages > 0) {
+ __clear_fixmap(idx, __pgprot(_PAGE_WIRED));
+ --idx;
+ --nrpages;
+ }
+
+ map->size = 0;
+ map->addr = NULL;
+
+ return 0;
+}
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
new file mode 100644
index 0000000000..fa50e8f6e7
--- /dev/null
+++ b/arch/sh/mm/kmap.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/sh/mm/kmap.c
+ *
+ * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
+ * Copyright (C) 2002 - 2009 Paul Mundt
+ */
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+static pte_t *kmap_coherent_pte;
+
+void __init kmap_coherent_init(void)
+{
+ unsigned long vaddr;
+
+ /* cache the first coherent kmap pte */
+ vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
+ kmap_coherent_pte = virt_to_kpte(vaddr);
+}
+
+void *kmap_coherent(struct page *page, unsigned long addr)
+{
+ struct folio *folio = page_folio(page);
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ BUG_ON(!test_bit(PG_dcache_clean, &folio->flags));
+
+ preempt_disable();
+ pagefault_disable();
+
+ idx = FIX_CMAP_END -
+ (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) +
+ (FIX_N_COLOURS * smp_processor_id()));
+
+ vaddr = __fix_to_virt(idx);
+
+ BUG_ON(!pte_none(*(kmap_coherent_pte - idx)));
+ set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL));
+
+ return (void *)vaddr;
+}
+
+void kunmap_coherent(void *kvaddr)
+{
+ if (kvaddr >= (void *)FIXADDR_START) {
+ unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = __virt_to_fix(vaddr);
+
+ /* XXX.. Kill this later, here for sanity at the moment.. */
+ __flush_purge_region((void *)vaddr, PAGE_SIZE);
+
+ pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx);
+ local_flush_tlb_one(get_asid(), vaddr);
+ }
+
+ pagefault_enable();
+ preempt_enable();
+}
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
new file mode 100644
index 0000000000..b82199878b
--- /dev/null
+++ b/arch/sh/mm/mmap.c
@@ -0,0 +1,184 @@
+/*
+ * arch/sh/mm/mmap.c
+ *
+ * Copyright (C) 2008 - 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+
+unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
+EXPORT_SYMBOL(shm_align_mask);
+
+#ifdef CONFIG_MMU
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_EXECREAD,
+ [VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
+};
+DECLARE_VM_GET_PAGE_PROT
+
+/*
+ * To avoid cache aliases, we map the shared page with same color.
+ */
+static inline unsigned long COLOUR_ALIGN(unsigned long addr,
+ unsigned long pgoff)
+{
+ unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
+ unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
+
+ return base + off;
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int do_colour_align;
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (unlikely(len > TASK_SIZE))
+ return -ENOMEM;
+
+ do_colour_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
+ if (addr) {
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = TASK_UNMAPPED_BASE;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ return vm_unmapped_area(&info);
+}
+
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_colour_align;
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (unlikely(len > TASK_SIZE))
+ return -ENOMEM;
+
+ do_colour_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
+ /* requesting a specific address */
+ if (addr) {
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+ info.low_limit = PAGE_SIZE;
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ addr = vm_unmapped_area(&info);
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ if (addr & ~PAGE_MASK) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
+ info.high_limit = TASK_SIZE;
+ addr = vm_unmapped_area(&info);
+ }
+
+ return addr;
+}
+#endif /* CONFIG_MMU */
+
+/*
+ * You really shouldn't be using read() or write() on /dev/mem. This
+ * might go away in the future.
+ */
+int valid_phys_addr_range(phys_addr_t addr, size_t count)
+{
+ if (addr < __MEMORY_START)
+ return 0;
+ if (addr + count > __pa(high_memory))
+ return 0;
+
+ return 1;
+}
+
+int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+{
+ return 1;
+}
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
new file mode 100644
index 0000000000..78c4b6e6d3
--- /dev/null
+++ b/arch/sh/mm/nommu.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/sh/mm/nommu.c
+ *
+ * Various helper routines and stubs for MMUless SH.
+ *
+ * Copyright (C) 2002 - 2009 Paul Mundt
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <linux/uaccess.h>
+
+/*
+ * Nothing too terribly exciting here ..
+ */
+void copy_page(void *to, void *from)
+{
+ memcpy(to, from, PAGE_SIZE);
+}
+
+__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n)
+{
+ memcpy(to, from, n);
+ return 0;
+}
+
+__kernel_size_t __clear_user(void __user *to, __kernel_size_t n)
+{
+ memset((__force void *)to, 0, n);
+ return 0;
+}
+
+void local_flush_tlb_all(void)
+{
+ BUG();
+}
+
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ BUG();
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ BUG();
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ BUG();
+}
+
+void local_flush_tlb_one(unsigned long asid, unsigned long page)
+{
+ BUG();
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ BUG();
+}
+
+void __flush_tlb_global(void)
+{
+}
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+}
+
+void __init kmap_coherent_init(void)
+{
+}
+
+void *kmap_coherent(struct page *page, unsigned long addr)
+{
+ BUG();
+ return NULL;
+}
+
+void kunmap_coherent(void *kvaddr)
+{
+ BUG();
+}
+
+void __init page_table_range_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base)
+{
+}
+
+void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+{
+}
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
new file mode 100644
index 0000000000..50f0dc1744
--- /dev/null
+++ b/arch/sh/mm/numa.c
@@ -0,0 +1,56 @@
+/*
+ * arch/sh/mm/numa.c - Multiple node support for SH machines
+ *
+ * Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/numa.h>
+#include <linux/pfn.h>
+#include <asm/sections.h>
+
+struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
+EXPORT_SYMBOL_GPL(node_data);
+
+/*
+ * On SH machines the conventional approach is to stash system RAM
+ * in node 0, and other memory blocks in to node 1 and up, ordered by
+ * latency. Each node's pgdat is node-local at the beginning of the node,
+ * immediately followed by the node mem map.
+ */
+void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
+{
+ unsigned long start_pfn, end_pfn;
+
+ /* Don't allow bogus node assignment */
+ BUG_ON(nid >= MAX_NUMNODES || nid <= 0);
+
+ start_pfn = PFN_DOWN(start);
+ end_pfn = PFN_DOWN(end);
+
+ pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
+ PAGE_KERNEL);
+
+ memblock_add(start, end - start);
+
+ __add_active_range(nid, start_pfn, end_pfn);
+
+ /* Node-local pgdat */
+ NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
+ SMP_CACHE_BYTES, nid);
+ if (!NODE_DATA(nid))
+ panic("%s: Failed to allocate %zu bytes align=0x%x nid=%d\n",
+ __func__, sizeof(struct pglist_data), SMP_CACHE_BYTES,
+ nid);
+
+ NODE_DATA(nid)->node_start_pfn = start_pfn;
+ NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+
+ /* It's up */
+ node_set_online(nid);
+}
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c
new file mode 100644
index 0000000000..cf7ce4b573
--- /dev/null
+++ b/arch/sh/mm/pgtable.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+static struct kmem_cache *pgd_cachep;
+#if PAGETABLE_LEVELS > 2
+static struct kmem_cache *pmd_cachep;
+#endif
+
+void pgd_ctor(void *x)
+{
+ pgd_t *pgd = x;
+
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
+void pgtable_cache_init(void)
+{
+ pgd_cachep = kmem_cache_create("pgd_cache",
+ PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
+ PAGE_SIZE, SLAB_PANIC, pgd_ctor);
+#if PAGETABLE_LEVELS > 2
+ pmd_cachep = kmem_cache_create("pmd_cache",
+ PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
+ PAGE_SIZE, SLAB_PANIC, NULL);
+#endif
+}
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(pgd_cachep, GFP_KERNEL);
+}
+
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ kmem_cache_free(pgd_cachep, pgd);
+}
+
+#if PAGETABLE_LEVELS > 2
+void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ set_pud(pud, __pud((unsigned long)pmd));
+}
+
+pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ return kmem_cache_alloc(pmd_cachep, GFP_KERNEL | __GFP_ZERO);
+}
+
+void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ kmem_cache_free(pmd_cachep, pmd);
+}
+#endif /* PAGETABLE_LEVELS > 2 */
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
new file mode 100644
index 0000000000..68eb7cc6e5
--- /dev/null
+++ b/arch/sh/mm/pmb.c
@@ -0,0 +1,887 @@
+/*
+ * arch/sh/mm/pmb.c
+ *
+ * Privileged Space Mapping Buffer (PMB) Support.
+ *
+ * Copyright (C) 2005 - 2011 Paul Mundt
+ * Copyright (C) 2010 Matt Fleming
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/syscore_ops.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
+#include <asm/cacheflush.h>
+#include <linux/sizes.h>
+#include <linux/uaccess.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+
+struct pmb_entry;
+
+struct pmb_entry {
+ unsigned long vpn;
+ unsigned long ppn;
+ unsigned long flags;
+ unsigned long size;
+
+ raw_spinlock_t lock;
+
+ /*
+ * 0 .. NR_PMB_ENTRIES for specific entry selection, or
+ * PMB_NO_ENTRY to search for a free one
+ */
+ int entry;
+
+ /* Adjacent entry link for contiguous multi-entry mappings */
+ struct pmb_entry *link;
+};
+
+static struct {
+ unsigned long size;
+ int flag;
+} pmb_sizes[] = {
+ { .size = SZ_512M, .flag = PMB_SZ_512M, },
+ { .size = SZ_128M, .flag = PMB_SZ_128M, },
+ { .size = SZ_64M, .flag = PMB_SZ_64M, },
+ { .size = SZ_16M, .flag = PMB_SZ_16M, },
+};
+
+static void pmb_unmap_entry(struct pmb_entry *, int depth);
+
+static DEFINE_RWLOCK(pmb_rwlock);
+static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
+static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
+
+static unsigned int pmb_iomapping_enabled;
+
+static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
+{
+ return (entry & PMB_E_MASK) << PMB_E_SHIFT;
+}
+
+static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
+{
+ return mk_pmb_entry(entry) | PMB_ADDR;
+}
+
+static __always_inline unsigned long mk_pmb_data(unsigned int entry)
+{
+ return mk_pmb_entry(entry) | PMB_DATA;
+}
+
+static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
+{
+ return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
+}
+
+/*
+ * Ensure that the PMB entries match our cache configuration.
+ *
+ * When we are in 32-bit address extended mode, CCR.CB becomes
+ * invalid, so care must be taken to manually adjust cacheable
+ * translations.
+ */
+static __always_inline unsigned long pmb_cache_flags(void)
+{
+ unsigned long flags = 0;
+
+#if defined(CONFIG_CACHE_OFF)
+ flags |= PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITETHROUGH)
+ flags |= PMB_C | PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITEBACK)
+ flags |= PMB_C;
+#endif
+
+ return flags;
+}
+
+/*
+ * Convert typical pgprot value to the PMB equivalent
+ */
+static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
+{
+ unsigned long pmb_flags = 0;
+ u64 flags = pgprot_val(prot);
+
+ if (flags & _PAGE_CACHABLE)
+ pmb_flags |= PMB_C;
+ if (flags & _PAGE_WT)
+ pmb_flags |= PMB_WT | PMB_UB;
+
+ return pmb_flags;
+}
+
+static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
+{
+ return (b->vpn == (a->vpn + a->size)) &&
+ (b->ppn == (a->ppn + a->size)) &&
+ (b->flags == a->flags);
+}
+
+static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
+ unsigned long size)
+{
+ int i;
+
+ read_lock(&pmb_rwlock);
+
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+ struct pmb_entry *pmbe, *iter;
+ unsigned long span;
+
+ if (!test_bit(i, pmb_map))
+ continue;
+
+ pmbe = &pmb_entry_list[i];
+
+ /*
+ * See if VPN and PPN are bounded by an existing mapping.
+ */
+ if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
+ continue;
+ if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
+ continue;
+
+ /*
+ * Now see if we're in range of a simple mapping.
+ */
+ if (size <= pmbe->size) {
+ read_unlock(&pmb_rwlock);
+ return true;
+ }
+
+ span = pmbe->size;
+
+ /*
+ * Finally for sizes that involve compound mappings, walk
+ * the chain.
+ */
+ for (iter = pmbe->link; iter; iter = iter->link)
+ span += iter->size;
+
+ /*
+ * Nothing else to do if the range requirements are met.
+ */
+ if (size <= span) {
+ read_unlock(&pmb_rwlock);
+ return true;
+ }
+ }
+
+ read_unlock(&pmb_rwlock);
+ return false;
+}
+
+static bool pmb_size_valid(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (pmb_sizes[i].size == size)
+ return true;
+
+ return false;
+}
+
+static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
+{
+ return (addr >= P1SEG && (addr + size - 1) < P3SEG);
+}
+
+static inline bool pmb_prot_valid(pgprot_t prot)
+{
+ return (pgprot_val(prot) & _PAGE_USER) == 0;
+}
+
+static int pmb_size_to_flags(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (pmb_sizes[i].size == size)
+ return pmb_sizes[i].flag;
+
+ return 0;
+}
+
+static int pmb_alloc_entry(void)
+{
+ int pos;
+
+ pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
+ if (pos >= 0 && pos < NR_PMB_ENTRIES)
+ __set_bit(pos, pmb_map);
+ else
+ pos = -ENOSPC;
+
+ return pos;
+}
+
+static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
+ unsigned long flags, int entry)
+{
+ struct pmb_entry *pmbe;
+ unsigned long irqflags;
+ void *ret = NULL;
+ int pos;
+
+ write_lock_irqsave(&pmb_rwlock, irqflags);
+
+ if (entry == PMB_NO_ENTRY) {
+ pos = pmb_alloc_entry();
+ if (unlikely(pos < 0)) {
+ ret = ERR_PTR(pos);
+ goto out;
+ }
+ } else {
+ if (__test_and_set_bit(entry, pmb_map)) {
+ ret = ERR_PTR(-ENOSPC);
+ goto out;
+ }
+
+ pos = entry;
+ }
+
+ write_unlock_irqrestore(&pmb_rwlock, irqflags);
+
+ pmbe = &pmb_entry_list[pos];
+
+ memset(pmbe, 0, sizeof(struct pmb_entry));
+
+ raw_spin_lock_init(&pmbe->lock);
+
+ pmbe->vpn = vpn;
+ pmbe->ppn = ppn;
+ pmbe->flags = flags;
+ pmbe->entry = pos;
+
+ return pmbe;
+
+out:
+ write_unlock_irqrestore(&pmb_rwlock, irqflags);
+ return ret;
+}
+
+static void pmb_free(struct pmb_entry *pmbe)
+{
+ __clear_bit(pmbe->entry, pmb_map);
+
+ pmbe->entry = PMB_NO_ENTRY;
+ pmbe->link = NULL;
+}
+
+/*
+ * Must be run uncached.
+ */
+static void __set_pmb_entry(struct pmb_entry *pmbe)
+{
+ unsigned long addr, data;
+
+ addr = mk_pmb_addr(pmbe->entry);
+ data = mk_pmb_data(pmbe->entry);
+
+ jump_to_uncached();
+
+ /* Set V-bit */
+ __raw_writel(pmbe->vpn | PMB_V, addr);
+ __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
+
+ back_to_cached();
+}
+
+static void __clear_pmb_entry(struct pmb_entry *pmbe)
+{
+ unsigned long addr, data;
+ unsigned long addr_val, data_val;
+
+ addr = mk_pmb_addr(pmbe->entry);
+ data = mk_pmb_data(pmbe->entry);
+
+ addr_val = __raw_readl(addr);
+ data_val = __raw_readl(data);
+
+ /* Clear V-bit */
+ writel_uncached(addr_val & ~PMB_V, addr);
+ writel_uncached(data_val & ~PMB_V, data);
+}
+
+#ifdef CONFIG_PM
+static void set_pmb_entry(struct pmb_entry *pmbe)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
+ __set_pmb_entry(pmbe);
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
+}
+#endif /* CONFIG_PM */
+
+int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
+ unsigned long size, pgprot_t prot)
+{
+ struct pmb_entry *pmbp, *pmbe;
+ unsigned long orig_addr, orig_size;
+ unsigned long flags, pmb_flags;
+ int i, mapped;
+
+ if (size < SZ_16M)
+ return -EINVAL;
+ if (!pmb_addr_valid(vaddr, size))
+ return -EFAULT;
+ if (pmb_mapping_exists(vaddr, phys, size))
+ return 0;
+
+ orig_addr = vaddr;
+ orig_size = size;
+
+ flush_tlb_kernel_range(vaddr, vaddr + size);
+
+ pmb_flags = pgprot_to_pmb_flags(prot);
+ pmbp = NULL;
+
+ do {
+ for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
+ if (size < pmb_sizes[i].size)
+ continue;
+
+ pmbe = pmb_alloc(vaddr, phys, pmb_flags |
+ pmb_sizes[i].flag, PMB_NO_ENTRY);
+ if (IS_ERR(pmbe)) {
+ pmb_unmap_entry(pmbp, mapped);
+ return PTR_ERR(pmbe);
+ }
+
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
+
+ pmbe->size = pmb_sizes[i].size;
+
+ __set_pmb_entry(pmbe);
+
+ phys += pmbe->size;
+ vaddr += pmbe->size;
+ size -= pmbe->size;
+
+ /*
+ * Link adjacent entries that span multiple PMB
+ * entries for easier tear-down.
+ */
+ if (likely(pmbp)) {
+ raw_spin_lock_nested(&pmbp->lock,
+ SINGLE_DEPTH_NESTING);
+ pmbp->link = pmbe;
+ raw_spin_unlock(&pmbp->lock);
+ }
+
+ pmbp = pmbe;
+
+ /*
+ * Instead of trying smaller sizes on every
+ * iteration (even if we succeed in allocating
+ * space), try using pmb_sizes[i].size again.
+ */
+ i--;
+ mapped++;
+
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
+ }
+ } while (size >= SZ_16M);
+
+ flush_cache_vmap(orig_addr, orig_addr + orig_size);
+
+ return 0;
+}
+
+void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
+ pgprot_t prot, void *caller)
+{
+ unsigned long vaddr;
+ phys_addr_t offset, last_addr;
+ phys_addr_t align_mask;
+ unsigned long aligned;
+ struct vm_struct *area;
+ int i, ret;
+
+ if (!pmb_iomapping_enabled)
+ return NULL;
+
+ /*
+ * Small mappings need to go through the TLB.
+ */
+ if (size < SZ_16M)
+ return ERR_PTR(-EINVAL);
+ if (!pmb_prot_valid(prot))
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (size >= pmb_sizes[i].size)
+ break;
+
+ last_addr = phys + size;
+ align_mask = ~(pmb_sizes[i].size - 1);
+ offset = phys & ~align_mask;
+ phys &= align_mask;
+ aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
+
+ /*
+ * XXX: This should really start from uncached_end, but this
+ * causes the MMU to reset, so for now we restrict it to the
+ * 0xb000...0xc000 range.
+ */
+ area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
+ P3SEG, caller);
+ if (!area)
+ return NULL;
+
+ area->phys_addr = phys;
+ vaddr = (unsigned long)area->addr;
+
+ ret = pmb_bolt_mapping(vaddr, phys, size, prot);
+ if (unlikely(ret != 0))
+ return ERR_PTR(ret);
+
+ return (void __iomem *)(offset + (char *)vaddr);
+}
+
+int pmb_unmap(void __iomem *addr)
+{
+ struct pmb_entry *pmbe = NULL;
+ unsigned long vaddr = (unsigned long __force)addr;
+ int i, found = 0;
+
+ read_lock(&pmb_rwlock);
+
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+ if (test_bit(i, pmb_map)) {
+ pmbe = &pmb_entry_list[i];
+ if (pmbe->vpn == vaddr) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ read_unlock(&pmb_rwlock);
+
+ if (found) {
+ pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
+{
+ do {
+ struct pmb_entry *pmblink = pmbe;
+
+ /*
+ * We may be called before this pmb_entry has been
+ * entered into the PMB table via set_pmb_entry(), but
+ * that's OK because we've allocated a unique slot for
+ * this entry in pmb_alloc() (even if we haven't filled
+ * it yet).
+ *
+ * Therefore, calling __clear_pmb_entry() is safe as no
+ * other mapping can be using that slot.
+ */
+ __clear_pmb_entry(pmbe);
+
+ flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
+
+ pmbe = pmblink->link;
+
+ pmb_free(pmblink);
+ } while (pmbe && --depth);
+}
+
+static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
+{
+ unsigned long flags;
+
+ if (unlikely(!pmbe))
+ return;
+
+ write_lock_irqsave(&pmb_rwlock, flags);
+ __pmb_unmap_entry(pmbe, depth);
+ write_unlock_irqrestore(&pmb_rwlock, flags);
+}
+
+static void __init pmb_notify(void)
+{
+ int i;
+
+ pr_info("PMB: boot mappings:\n");
+
+ read_lock(&pmb_rwlock);
+
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+ struct pmb_entry *pmbe;
+
+ if (!test_bit(i, pmb_map))
+ continue;
+
+ pmbe = &pmb_entry_list[i];
+
+ pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
+ pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
+ pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
+ }
+
+ read_unlock(&pmb_rwlock);
+}
+
+/*
+ * Sync our software copy of the PMB mappings with those in hardware. The
+ * mappings in the hardware PMB were either set up by the bootloader or
+ * very early on by the kernel.
+ */
+static void __init pmb_synchronize(void)
+{
+ struct pmb_entry *pmbp = NULL;
+ int i, j;
+
+ /*
+ * Run through the initial boot mappings, log the established
+ * ones, and blow away anything that falls outside of the valid
+ * PPN range. Specifically, we only care about existing mappings
+ * that impact the cached/uncached sections.
+ *
+ * Note that touching these can be a bit of a minefield; the boot
+ * loader can establish multi-page mappings with the same caching
+ * attributes, so we need to ensure that we aren't modifying a
+ * mapping that we're presently executing from, or may execute
+ * from in the case of straddling page boundaries.
+ *
+ * In the future we will have to tidy up after the boot loader by
+ * jumping between the cached and uncached mappings and tearing
+ * down alternating mappings while executing from the other.
+ */
+ for (i = 0; i < NR_PMB_ENTRIES; i++) {
+ unsigned long addr, data;
+ unsigned long addr_val, data_val;
+ unsigned long ppn, vpn, flags;
+ unsigned long irqflags;
+ unsigned int size;
+ struct pmb_entry *pmbe;
+
+ addr = mk_pmb_addr(i);
+ data = mk_pmb_data(i);
+
+ addr_val = __raw_readl(addr);
+ data_val = __raw_readl(data);
+
+ /*
+ * Skip over any bogus entries
+ */
+ if (!(data_val & PMB_V) || !(addr_val & PMB_V))
+ continue;
+
+ ppn = data_val & PMB_PFN_MASK;
+ vpn = addr_val & PMB_PFN_MASK;
+
+ /*
+ * Only preserve in-range mappings.
+ */
+ if (!pmb_ppn_in_range(ppn)) {
+ /*
+ * Invalidate anything out of bounds.
+ */
+ writel_uncached(addr_val & ~PMB_V, addr);
+ writel_uncached(data_val & ~PMB_V, data);
+ continue;
+ }
+
+ /*
+ * Update the caching attributes if necessary
+ */
+ if (data_val & PMB_C) {
+ data_val &= ~PMB_CACHE_MASK;
+ data_val |= pmb_cache_flags();
+
+ writel_uncached(data_val, data);
+ }
+
+ size = data_val & PMB_SZ_MASK;
+ flags = size | (data_val & PMB_CACHE_MASK);
+
+ pmbe = pmb_alloc(vpn, ppn, flags, i);
+ if (IS_ERR(pmbe)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ raw_spin_lock_irqsave(&pmbe->lock, irqflags);
+
+ for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
+ if (pmb_sizes[j].flag == size)
+ pmbe->size = pmb_sizes[j].size;
+
+ if (pmbp) {
+ raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
+ /*
+ * Compare the previous entry against the current one to
+ * see if the entries span a contiguous mapping. If so,
+ * setup the entry links accordingly. Compound mappings
+ * are later coalesced.
+ */
+ if (pmb_can_merge(pmbp, pmbe))
+ pmbp->link = pmbe;
+ raw_spin_unlock(&pmbp->lock);
+ }
+
+ pmbp = pmbe;
+
+ raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
+ }
+}
+
+static void __init pmb_merge(struct pmb_entry *head)
+{
+ unsigned long span, newsize;
+ struct pmb_entry *tail;
+ int i = 1, depth = 0;
+
+ span = newsize = head->size;
+
+ tail = head->link;
+ while (tail) {
+ span += tail->size;
+
+ if (pmb_size_valid(span)) {
+ newsize = span;
+ depth = i;
+ }
+
+ /* This is the end of the line.. */
+ if (!tail->link)
+ break;
+
+ tail = tail->link;
+ i++;
+ }
+
+ /*
+ * The merged page size must be valid.
+ */
+ if (!depth || !pmb_size_valid(newsize))
+ return;
+
+ head->flags &= ~PMB_SZ_MASK;
+ head->flags |= pmb_size_to_flags(newsize);
+
+ head->size = newsize;
+
+ __pmb_unmap_entry(head->link, depth);
+ __set_pmb_entry(head);
+}
+
+static void __init pmb_coalesce(void)
+{
+ unsigned long flags;
+ int i;
+
+ write_lock_irqsave(&pmb_rwlock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+ struct pmb_entry *pmbe;
+
+ if (!test_bit(i, pmb_map))
+ continue;
+
+ pmbe = &pmb_entry_list[i];
+
+ /*
+ * We're only interested in compound mappings
+ */
+ if (!pmbe->link)
+ continue;
+
+ /*
+ * Nothing to do if it already uses the largest possible
+ * page size.
+ */
+ if (pmbe->size == SZ_512M)
+ continue;
+
+ pmb_merge(pmbe);
+ }
+
+ write_unlock_irqrestore(&pmb_rwlock, flags);
+}
+
+#ifdef CONFIG_UNCACHED_MAPPING
+static void __init pmb_resize(void)
+{
+ int i;
+
+ /*
+ * If the uncached mapping was constructed by the kernel, it will
+ * already be a reasonable size.
+ */
+ if (uncached_size == SZ_16M)
+ return;
+
+ read_lock(&pmb_rwlock);
+
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+ struct pmb_entry *pmbe;
+ unsigned long flags;
+
+ if (!test_bit(i, pmb_map))
+ continue;
+
+ pmbe = &pmb_entry_list[i];
+
+ if (pmbe->vpn != uncached_start)
+ continue;
+
+ /*
+ * Found it, now resize it.
+ */
+ raw_spin_lock_irqsave(&pmbe->lock, flags);
+
+ pmbe->size = SZ_16M;
+ pmbe->flags &= ~PMB_SZ_MASK;
+ pmbe->flags |= pmb_size_to_flags(pmbe->size);
+
+ uncached_resize(pmbe->size);
+
+ __set_pmb_entry(pmbe);
+
+ raw_spin_unlock_irqrestore(&pmbe->lock, flags);
+ }
+
+ read_unlock(&pmb_rwlock);
+}
+#endif
+
+static int __init early_pmb(char *p)
+{
+ if (!p)
+ return 0;
+
+ if (strstr(p, "iomap"))
+ pmb_iomapping_enabled = 1;
+
+ return 0;
+}
+early_param("pmb", early_pmb);
+
+void __init pmb_init(void)
+{
+ /* Synchronize software state */
+ pmb_synchronize();
+
+ /* Attempt to combine compound mappings */
+ pmb_coalesce();
+
+#ifdef CONFIG_UNCACHED_MAPPING
+ /* Resize initial mappings, if necessary */
+ pmb_resize();
+#endif
+
+ /* Log them */
+ pmb_notify();
+
+ writel_uncached(0, PMB_IRMCR);
+
+ /* Flush out the TLB */
+ local_flush_tlb_all();
+ ctrl_barrier();
+}
+
+bool __in_29bit_mode(void)
+{
+ return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
+}
+
+static int pmb_debugfs_show(struct seq_file *file, void *iter)
+{
+ int i;
+
+ seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
+ "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
+ seq_printf(file, "ety vpn ppn size flags\n");
+
+ for (i = 0; i < NR_PMB_ENTRIES; i++) {
+ unsigned long addr, data;
+ unsigned int size;
+ char *sz_str = NULL;
+
+ addr = __raw_readl(mk_pmb_addr(i));
+ data = __raw_readl(mk_pmb_data(i));
+
+ size = data & PMB_SZ_MASK;
+ sz_str = (size == PMB_SZ_16M) ? " 16MB":
+ (size == PMB_SZ_64M) ? " 64MB":
+ (size == PMB_SZ_128M) ? "128MB":
+ "512MB";
+
+ /* 02: V 0x88 0x08 128MB C CB B */
+ seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
+ i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
+ (addr >> 24) & 0xff, (data >> 24) & 0xff,
+ sz_str, (data & PMB_C) ? 'C' : ' ',
+ (data & PMB_WT) ? "WT" : "CB",
+ (data & PMB_UB) ? "UB" : " B");
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pmb_debugfs);
+
+static int __init pmb_debugfs_init(void)
+{
+ debugfs_create_file("pmb", S_IFREG | S_IRUGO, arch_debugfs_dir, NULL,
+ &pmb_debugfs_fops);
+ return 0;
+}
+subsys_initcall(pmb_debugfs_init);
+
+#ifdef CONFIG_PM
+static void pmb_syscore_resume(void)
+{
+ struct pmb_entry *pmbe;
+ int i;
+
+ read_lock(&pmb_rwlock);
+
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+ if (test_bit(i, pmb_map)) {
+ pmbe = &pmb_entry_list[i];
+ set_pmb_entry(pmbe);
+ }
+ }
+
+ read_unlock(&pmb_rwlock);
+}
+
+static struct syscore_ops pmb_syscore_ops = {
+ .resume = pmb_syscore_resume,
+};
+
+static int __init pmb_sysdev_init(void)
+{
+ register_syscore_ops(&pmb_syscore_ops);
+ return 0;
+}
+subsys_initcall(pmb_sysdev_init);
+#endif
diff --git a/arch/sh/mm/sram.c b/arch/sh/mm/sram.c
new file mode 100644
index 0000000000..2d8fa718d5
--- /dev/null
+++ b/arch/sh/mm/sram.c
@@ -0,0 +1,35 @@
+/*
+ * SRAM pool for tiny memories not otherwise managed.
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <asm/sram.h>
+
+/*
+ * This provides a standard SRAM pool for tiny memories that can be
+ * added either by the CPU or the platform code. Typical SRAM sizes
+ * to be inserted in to the pool will generally be less than the page
+ * size, with anything more reasonably sized handled as a NUMA memory
+ * node.
+ */
+struct gen_pool *sram_pool;
+
+static int __init sram_pool_init(void)
+{
+ /*
+ * This is a global pool, we don't care about node locality.
+ */
+ sram_pool = gen_pool_create(1, -1);
+ if (unlikely(!sram_pool))
+ return -ENOMEM;
+
+ return 0;
+}
+core_initcall(sram_pool_init);
diff --git a/arch/sh/mm/tlb-debugfs.c b/arch/sh/mm/tlb-debugfs.c
new file mode 100644
index 0000000000..11c6148283
--- /dev/null
+++ b/arch/sh/mm/tlb-debugfs.c
@@ -0,0 +1,160 @@
+/*
+ * arch/sh/mm/tlb-debugfs.c
+ *
+ * debugfs ops for SH-4 ITLB/UTLBs.
+ *
+ * Copyright (C) 2010 Matt Fleming
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+enum tlb_type {
+ TLB_TYPE_ITLB,
+ TLB_TYPE_UTLB,
+};
+
+static struct {
+ int bits;
+ const char *size;
+} tlb_sizes[] = {
+ { 0x0, " 1KB" },
+ { 0x1, " 4KB" },
+ { 0x2, " 8KB" },
+ { 0x4, " 64KB" },
+ { 0x5, "256KB" },
+ { 0x7, " 1MB" },
+ { 0x8, " 4MB" },
+ { 0xc, " 64MB" },
+};
+
+static int tlb_seq_show(struct seq_file *file, void *iter)
+{
+ unsigned int tlb_type = (unsigned int)file->private;
+ unsigned long addr1, addr2, data1, data2;
+ unsigned long flags;
+ unsigned long mmucr;
+ unsigned int nentries, entry;
+ unsigned int urb;
+
+ mmucr = __raw_readl(MMUCR);
+ if ((mmucr & 0x1) == 0) {
+ seq_printf(file, "address translation disabled\n");
+ return 0;
+ }
+
+ if (tlb_type == TLB_TYPE_ITLB) {
+ addr1 = MMU_ITLB_ADDRESS_ARRAY;
+ addr2 = MMU_ITLB_ADDRESS_ARRAY2;
+ data1 = MMU_ITLB_DATA_ARRAY;
+ data2 = MMU_ITLB_DATA_ARRAY2;
+ nentries = 4;
+ } else {
+ addr1 = MMU_UTLB_ADDRESS_ARRAY;
+ addr2 = MMU_UTLB_ADDRESS_ARRAY2;
+ data1 = MMU_UTLB_DATA_ARRAY;
+ data2 = MMU_UTLB_DATA_ARRAY2;
+ nentries = 64;
+ }
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ urb = (mmucr & MMUCR_URB) >> MMUCR_URB_SHIFT;
+
+ /* Make the "entry >= urb" test fail. */
+ if (urb == 0)
+ urb = MMUCR_URB_NENTRIES + 1;
+
+ if (tlb_type == TLB_TYPE_ITLB) {
+ addr1 = MMU_ITLB_ADDRESS_ARRAY;
+ addr2 = MMU_ITLB_ADDRESS_ARRAY2;
+ data1 = MMU_ITLB_DATA_ARRAY;
+ data2 = MMU_ITLB_DATA_ARRAY2;
+ nentries = 4;
+ } else {
+ addr1 = MMU_UTLB_ADDRESS_ARRAY;
+ addr2 = MMU_UTLB_ADDRESS_ARRAY2;
+ data1 = MMU_UTLB_DATA_ARRAY;
+ data2 = MMU_UTLB_DATA_ARRAY2;
+ nentries = 64;
+ }
+
+ seq_printf(file, "entry: vpn ppn asid size valid wired\n");
+
+ for (entry = 0; entry < nentries; entry++) {
+ unsigned long vpn, ppn, asid, size;
+ unsigned long valid;
+ unsigned long val;
+ const char *sz = " ?";
+ int i;
+
+ val = __raw_readl(addr1 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ vpn = val & 0xfffffc00;
+ valid = val & 0x100;
+
+ val = __raw_readl(addr2 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ asid = val & MMU_CONTEXT_ASID_MASK;
+
+ val = __raw_readl(data1 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ ppn = (val & 0x0ffffc00) << 4;
+
+ val = __raw_readl(data2 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ size = (val & 0xf0) >> 4;
+
+ for (i = 0; i < ARRAY_SIZE(tlb_sizes); i++) {
+ if (tlb_sizes[i].bits == size)
+ break;
+ }
+
+ if (i != ARRAY_SIZE(tlb_sizes))
+ sz = tlb_sizes[i].size;
+
+ seq_printf(file, "%2d: 0x%08lx 0x%08lx %5lu %s %s %s\n",
+ entry, vpn, ppn, asid,
+ sz, valid ? "V" : "-",
+ (urb <= entry) ? "W" : "-");
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int tlb_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tlb_seq_show, inode->i_private);
+}
+
+static const struct file_operations tlb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = tlb_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tlb_debugfs_init(void)
+{
+ debugfs_create_file("itlb", S_IRUSR, arch_debugfs_dir,
+ (void *)TLB_TYPE_ITLB, &tlb_debugfs_fops);
+ debugfs_create_file("utlb", S_IRUSR, arch_debugfs_dir,
+ (void *)TLB_TYPE_UTLB, &tlb_debugfs_fops);
+ return 0;
+}
+module_init(tlb_debugfs_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
new file mode 100644
index 0000000000..4db21adfe5
--- /dev/null
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -0,0 +1,106 @@
+/*
+ * arch/sh/mm/tlb-pteaex.c
+ *
+ * TLB operations for SH-X3 CPUs featuring PTE ASID Extensions.
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ unsigned long flags, pteval, vpn;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (vma && current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+
+ /* Set PTEH register */
+ vpn = address & MMU_VPN_MASK;
+ __raw_writel(vpn, MMU_PTEH);
+
+ /* Set PTEAEX */
+ __raw_writel(get_asid(), MMU_PTEAEX);
+
+ pteval = pte.pte_low;
+
+ /* Set PTEA register */
+#ifdef CONFIG_X2TLB
+ /*
+ * For the extended mode TLB this is trivial, only the ESZ and
+ * EPR bits need to be written out to PTEA, with the remainder of
+ * the protection bits (with the exception of the compat-mode SZ
+ * and PR bits, which are cleared) being written out in PTEL.
+ */
+ __raw_writel(pte.pte_high, MMU_PTEA);
+#endif
+
+ /* Set PTEL register */
+ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
+#ifdef CONFIG_CACHE_WRITETHROUGH
+ pteval |= _PAGE_WT;
+#endif
+ /* conveniently, we want all the software flags to be 0 anyway */
+ __raw_writel(pteval, MMU_PTEL);
+
+ /* Load the TLB */
+ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
+ local_irq_restore(flags);
+}
+
+/*
+ * While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB
+ * data arrays, SH-X3 cores with PTEAEX split out the memory-mapped
+ * address arrays. In compat mode the second array is inaccessible, while
+ * in extended mode, the legacy 8-bit ASID field in address array 1 has
+ * undefined behaviour.
+ */
+void local_flush_tlb_one(unsigned long asid, unsigned long page)
+{
+ jump_to_uncached();
+ __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
+ __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
+ __raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
+ __raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
+ back_to_cached();
+}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags, status;
+ int i;
+
+ /*
+ * Flush all the TLB.
+ */
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ status = __raw_readl(MMUCR);
+ status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
+
+ if (status == 0)
+ status = MMUCR_URB_NENTRIES;
+
+ for (i = 0; i < status; i++)
+ __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
+
+ for (i = 0; i < 4; i++)
+ __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
+
+ back_to_cached();
+ ctrl_barrier();
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
new file mode 100644
index 0000000000..fb400afc2a
--- /dev/null
+++ b/arch/sh/mm/tlb-sh3.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/sh/mm/tlb-sh3.c
+ *
+ * SH-3 specific TLB operations
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2002 Paul Mundt
+ */
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ unsigned long flags, pteval, vpn;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (vma && current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+
+ /* Set PTEH register */
+ vpn = (address & MMU_VPN_MASK) | get_asid();
+ __raw_writel(vpn, MMU_PTEH);
+
+ pteval = pte_val(pte);
+
+ /* Set PTEL register */
+ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
+ /* conveniently, we want all the software flags to be 0 anyway */
+ __raw_writel(pteval, MMU_PTEL);
+
+ /* Load the TLB */
+ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_one(unsigned long asid, unsigned long page)
+{
+ unsigned long addr, data;
+ int i, ways = MMU_NTLB_WAYS;
+
+ /*
+ * NOTE: PTEH.ASID should be set to this MM
+ * _AND_ we need to write ASID to the array.
+ *
+ * It would be simple if we didn't need to set PTEH.ASID...
+ */
+ addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000);
+ data = (page & 0xfffe0000) | asid; /* VALID bit is off */
+
+ if ((current_cpu_data.flags & CPU_HAS_MMU_PAGE_ASSOC)) {
+ addr |= MMU_PAGE_ASSOC_BIT;
+ ways = 1; /* we already know the way .. */
+ }
+
+ for (i = 0; i < ways; i++)
+ __raw_writel(data, addr + (i << 8));
+}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags, status;
+
+ /*
+ * Flush all the TLB.
+ *
+ * Write to the MMU control register's bit:
+ * TF-bit for SH-3, TI-bit for SH-4.
+ * It's same position, bit #2.
+ */
+ local_irq_save(flags);
+ status = __raw_readl(MMUCR);
+ status |= 0x04;
+ __raw_writel(status, MMUCR);
+ ctrl_barrier();
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
new file mode 100644
index 0000000000..aa0a9f4680
--- /dev/null
+++ b/arch/sh/mm/tlb-sh4.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch/sh/mm/tlb-sh4.c
+ *
+ * SH-4 specific TLB operations
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2002 - 2007 Paul Mundt
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ unsigned long flags, pteval, vpn;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (vma && current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+
+ /* Set PTEH register */
+ vpn = (address & MMU_VPN_MASK) | get_asid();
+ __raw_writel(vpn, MMU_PTEH);
+
+ pteval = pte.pte_low;
+
+ /* Set PTEA register */
+#ifdef CONFIG_X2TLB
+ /*
+ * For the extended mode TLB this is trivial, only the ESZ and
+ * EPR bits need to be written out to PTEA, with the remainder of
+ * the protection bits (with the exception of the compat-mode SZ
+ * and PR bits, which are cleared) being written out in PTEL.
+ */
+ __raw_writel(pte.pte_high, MMU_PTEA);
+#else
+ if (cpu_data->flags & CPU_HAS_PTEA) {
+ /* The last 3 bits and the first one of pteval contains
+ * the PTEA timing control and space attribute bits
+ */
+ __raw_writel(copy_ptea_attributes(pteval), MMU_PTEA);
+ }
+#endif
+
+ /* Set PTEL register */
+ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
+#ifdef CONFIG_CACHE_WRITETHROUGH
+ pteval |= _PAGE_WT;
+#endif
+ /* conveniently, we want all the software flags to be 0 anyway */
+ __raw_writel(pteval, MMU_PTEL);
+
+ /* Load the TLB */
+ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_one(unsigned long asid, unsigned long page)
+{
+ unsigned long addr, data;
+
+ /*
+ * NOTE: PTEH.ASID should be set to this MM
+ * _AND_ we need to write ASID to the array.
+ *
+ * It would be simple if we didn't need to set PTEH.ASID...
+ */
+ addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
+ data = page | asid; /* VALID bit is off */
+ jump_to_uncached();
+ __raw_writel(data, addr);
+ back_to_cached();
+}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags, status;
+ int i;
+
+ /*
+ * Flush all the TLB.
+ */
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ status = __raw_readl(MMUCR);
+ status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
+
+ if (status == 0)
+ status = MMUCR_URB_NENTRIES;
+
+ for (i = 0; i < status; i++)
+ __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
+
+ for (i = 0; i < 4; i++)
+ __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
+
+ back_to_cached();
+ ctrl_barrier();
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-urb.c b/arch/sh/mm/tlb-urb.c
new file mode 100644
index 0000000000..c92ce20db3
--- /dev/null
+++ b/arch/sh/mm/tlb-urb.c
@@ -0,0 +1,93 @@
+/*
+ * arch/sh/mm/tlb-urb.c
+ *
+ * TLB entry wiring helpers for URB-equipped parts.
+ *
+ * Copyright (C) 2010 Matt Fleming
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+
+/*
+ * Load the entry for 'addr' into the TLB and wire the entry.
+ */
+void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+ unsigned long status, flags;
+ int urb;
+
+ local_irq_save(flags);
+
+ status = __raw_readl(MMUCR);
+ urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
+ status &= ~MMUCR_URC;
+
+ /*
+ * Make sure we're not trying to wire the last TLB entry slot.
+ */
+ BUG_ON(!--urb);
+
+ urb = urb % MMUCR_URB_NENTRIES;
+
+ /*
+ * Insert this entry into the highest non-wired TLB slot (via
+ * the URC field).
+ */
+ status |= (urb << MMUCR_URC_SHIFT);
+ __raw_writel(status, MMUCR);
+ ctrl_barrier();
+
+ /* Load the entry into the TLB */
+ __update_tlb(vma, addr, pte);
+
+ /* ... and wire it up. */
+ status = __raw_readl(MMUCR);
+
+ status &= ~MMUCR_URB;
+ status |= (urb << MMUCR_URB_SHIFT);
+
+ __raw_writel(status, MMUCR);
+ ctrl_barrier();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Unwire the last wired TLB entry.
+ *
+ * It should also be noted that it is not possible to wire and unwire
+ * TLB entries in an arbitrary order. If you wire TLB entry N, followed
+ * by entry N+1, you must unwire entry N+1 first, then entry N. In this
+ * respect, it works like a stack or LIFO queue.
+ */
+void tlb_unwire_entry(void)
+{
+ unsigned long status, flags;
+ int urb;
+
+ local_irq_save(flags);
+
+ status = __raw_readl(MMUCR);
+ urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
+ status &= ~MMUCR_URB;
+
+ /*
+ * Make sure we're not trying to unwire a TLB entry when none
+ * have been wired.
+ */
+ BUG_ON(urb++ == MMUCR_URB_NENTRIES);
+
+ urb = urb % MMUCR_URB_NENTRIES;
+
+ status |= (urb << MMUCR_URB_SHIFT);
+ __raw_writel(status, MMUCR);
+ ctrl_barrier();
+
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlbex_32.c b/arch/sh/mm/tlbex_32.c
new file mode 100644
index 0000000000..1c53868632
--- /dev/null
+++ b/arch/sh/mm/tlbex_32.c
@@ -0,0 +1,82 @@
+/*
+ * TLB miss handler for SH with an MMU.
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2003 - 2012 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+
+/*
+ * Called with interrupts disabled.
+ */
+asmlinkage int __kprobes
+handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ pte_t entry;
+
+ /*
+ * We don't take page faults for P1, P2, and parts of P4, these
+ * are always mapped, whether it be due to legacy behaviour in
+ * 29-bit mode, or due to PMB configuration in 32-bit mode.
+ */
+ if (address >= P3SEG && address < P3_ADDR_MAX) {
+ pgd = pgd_offset_k(address);
+ } else {
+ if (unlikely(address >= TASK_SIZE || !current->mm))
+ return 1;
+
+ pgd = pgd_offset(current->mm, address);
+ }
+
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none_or_clear_bad(p4d))
+ return 1;
+ pud = pud_offset(p4d, address);
+ if (pud_none_or_clear_bad(pud))
+ return 1;
+ pmd = pmd_offset(pud, address);
+ if (pmd_none_or_clear_bad(pmd))
+ return 1;
+ pte = pte_offset_kernel(pmd, address);
+ entry = *pte;
+ if (unlikely(pte_none(entry) || pte_not_present(entry)))
+ return 1;
+ if (unlikely(error_code && !pte_write(entry)))
+ return 1;
+
+ if (error_code)
+ entry = pte_mkdirty(entry);
+ entry = pte_mkyoung(entry);
+
+ set_pte(pte, entry);
+
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
+ /*
+ * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
+ * the case of an initial page write exception, so we need to
+ * flush it in order to avoid potential TLB entry duplication.
+ */
+ if (error_code == FAULT_CODE_INITIAL)
+ local_flush_tlb_one(get_asid(), address & PAGE_MASK);
+#endif
+
+ set_thread_fault_code(error_code);
+ update_mmu_cache(NULL, address, pte);
+
+ return 0;
+}
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
new file mode 100644
index 0000000000..a6a20d6de4
--- /dev/null
+++ b/arch/sh/mm/tlbflush_32.c
@@ -0,0 +1,137 @@
+/*
+ * TLB flushing operations for SH with an MMU.
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
+ unsigned long flags;
+ unsigned long asid;
+ unsigned long saved_asid = MMU_NO_ASID;
+
+ asid = cpu_asid(cpu, vma->vm_mm);
+ page &= PAGE_MASK;
+
+ local_irq_save(flags);
+ if (vma->vm_mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+ local_flush_tlb_one(asid, page);
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
+ local_irq_restore(flags);
+ }
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != NO_CONTEXT) {
+ unsigned long flags;
+ int size;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
+ cpu_context(cpu, mm) = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm, cpu);
+ } else {
+ unsigned long asid;
+ unsigned long saved_asid = MMU_NO_ASID;
+
+ asid = cpu_asid(cpu, mm);
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ if (mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+ while (start < end) {
+ local_flush_tlb_one(asid, start);
+ start += PAGE_SIZE;
+ }
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+ int size;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
+ local_flush_tlb_all();
+ } else {
+ unsigned long asid;
+ unsigned long saved_asid = get_asid();
+
+ asid = cpu_asid(cpu, &init_mm);
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ set_asid(asid);
+ while (start < end) {
+ local_flush_tlb_one(asid, start);
+ start += PAGE_SIZE;
+ }
+ set_asid(saved_asid);
+ }
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /* Invalidate all TLB of this process. */
+ /* Instead of invalidating each TLB, we get new MMU context. */
+ if (cpu_context(cpu, mm) != NO_CONTEXT) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cpu_context(cpu, mm) = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm, cpu);
+ local_irq_restore(flags);
+ }
+}
+
+void __flush_tlb_global(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * This is the most destructive of the TLB flushing options,
+ * and will tear down all of the UTLB/ITLB mappings, including
+ * wired entries.
+ */
+ __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
+
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
new file mode 100644
index 0000000000..bd1585e8ef
--- /dev/null
+++ b/arch/sh/mm/uncached.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <asm/page.h>
+#include <asm/addrspace.h>
+
+/*
+ * This is the offset of the uncached section from its cached alias.
+ *
+ * Legacy platforms handle trivial transitions between cached and
+ * uncached segments by making use of the 1:1 mapping relationship in
+ * 512MB lowmem, others via a special uncached mapping.
+ *
+ * Default value only valid in 29 bit mode, in 32bit mode this will be
+ * updated by the early PMB initialization code.
+ */
+unsigned long cached_to_uncached = SZ_512M;
+unsigned long uncached_size = SZ_512M;
+unsigned long uncached_start, uncached_end;
+EXPORT_SYMBOL(uncached_start);
+EXPORT_SYMBOL(uncached_end);
+
+int virt_addr_uncached(unsigned long kaddr)
+{
+ return (kaddr >= uncached_start) && (kaddr < uncached_end);
+}
+EXPORT_SYMBOL(virt_addr_uncached);
+
+void __init uncached_init(void)
+{
+#if defined(CONFIG_29BIT) || !defined(CONFIG_MMU)
+ uncached_start = P2SEG;
+#else
+ uncached_start = memory_end;
+#endif
+ uncached_end = uncached_start + uncached_size;
+}
+
+void __init uncached_resize(unsigned long size)
+{
+ uncached_size = size;
+ uncached_end = uncached_start + uncached_size;
+}