summaryrefslogtreecommitdiffstats
path: root/arch/openrisc/mm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/openrisc/mm
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/openrisc/mm')
-rw-r--r--arch/openrisc/mm/Makefile6
-rw-r--r--arch/openrisc/mm/cache.c57
-rw-r--r--arch/openrisc/mm/fault.c348
-rw-r--r--arch/openrisc/mm/init.c230
-rw-r--r--arch/openrisc/mm/ioremap.c130
-rw-r--r--arch/openrisc/mm/tlb.c193
6 files changed, 964 insertions, 0 deletions
diff --git a/arch/openrisc/mm/Makefile b/arch/openrisc/mm/Makefile
new file mode 100644
index 000000000..8a0e580e2
--- /dev/null
+++ b/arch/openrisc/mm/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the linux openrisc-specific parts of the memory manager.
+#
+
+obj-y := fault.o cache.o tlb.o init.o ioremap.o
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
new file mode 100644
index 000000000..534a52ec5
--- /dev/null
+++ b/arch/openrisc/mm/cache.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OpenRISC cache.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2015 Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de>
+ */
+
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+static __always_inline void cache_loop(struct page *page, const unsigned int reg)
+{
+ unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
+ unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
+
+ while (line < paddr + PAGE_SIZE) {
+ mtspr(reg, line);
+ line += L1_CACHE_BYTES;
+ }
+}
+
+void local_dcache_page_flush(struct page *page)
+{
+ cache_loop(page, SPR_DCBFR);
+}
+EXPORT_SYMBOL(local_dcache_page_flush);
+
+void local_icache_page_inv(struct page *page)
+{
+ cache_loop(page, SPR_ICBIR);
+}
+EXPORT_SYMBOL(local_icache_page_inv);
+
+void update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte)
+{
+ unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
+ struct page *page = pfn_to_page(pfn);
+ int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
+
+ /*
+ * Since icaches do not snoop for updated data on OpenRISC, we
+ * must write back and invalidate any dirty pages manually. We
+ * can skip data pages, since they will not end up in icaches.
+ */
+ if ((vma->vm_flags & VM_EXEC) && dirty)
+ sync_icache_dcache(page);
+}
+
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
new file mode 100644
index 000000000..e3ad46d02
--- /dev/null
+++ b/arch/openrisc/mm/fault.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OpenRISC fault.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ */
+
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/extable.h>
+#include <linux/sched/signal.h>
+#include <linux/perf_event.h>
+
+#include <linux/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+#define NUM_TLB_ENTRIES 64
+#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
+
+/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
+ * - also look into include/asm/mmu_context.h
+ */
+volatile pgd_t *current_pgd[NR_CPUS];
+
+extern void __noreturn die(char *, struct pt_regs *, long);
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * If this routine detects a bad access, it returns 1, otherwise it
+ * returns 0.
+ */
+
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long vector, int write_acc)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ int si_code;
+ vm_fault_t fault;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
+
+ tsk = current;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * NOTE2: This is done so that, when updating the vmalloc
+ * mappings we don't have to walk all processes pgdirs and
+ * add the high mappings all at once. Instead we do it as they
+ * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
+ * bit set so sometimes the TLB can use a lingering entry.
+ *
+ * This verifies that the fault happens in kernel space
+ * and that the fault was not a protection error.
+ */
+
+ if (address >= VMALLOC_START &&
+ (vector != 0x300 && vector != 0x400) &&
+ !user_mode(regs))
+ goto vmalloc_fault;
+
+ /* If exceptions were enabled, we can reenable them here */
+ if (user_mode(regs)) {
+ /* Exception was in userspace: reenable interrupts */
+ local_irq_enable();
+ flags |= FAULT_FLAG_USER;
+ } else {
+ /* If exception was in a syscall, then IRQ's may have
+ * been enabled or disabled. If they were enabled,
+ * reenable them.
+ */
+ if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE))
+ local_irq_enable();
+ }
+
+ mm = tsk->mm;
+ si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+
+ if (in_interrupt() || !mm)
+ goto no_context;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+retry:
+ mmap_read_lock(mm);
+ vma = find_vma(mm, address);
+
+ if (!vma)
+ goto bad_area;
+
+ if (vma->vm_start <= address)
+ goto good_area;
+
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+
+ if (user_mode(regs)) {
+ /*
+ * accessing the stack below usp is always a bug.
+ * we get page-aligned addresses so we can only check
+ * if we're within a page from usp, but that might be
+ * enough to catch brutal errors at least.
+ */
+ if (address + PAGE_SIZE < regs->sp)
+ goto bad_area;
+ }
+ vma = expand_stack(mm, address);
+ if (!vma)
+ goto bad_area_nosemaphore;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+
+good_area:
+ si_code = SEGV_ACCERR;
+
+ /* first do some preliminary protection checks */
+
+ if (write_acc) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
+ } else {
+ /* not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ /* are we trying to execute nonexecutable area */
+ if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
+ goto bad_area;
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ if (fault_signal_pending(fault, regs))
+ return;
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+
+ /*RGD modeled on Cris */
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+
+ mmap_read_unlock(mm);
+ return;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+
+bad_area:
+ mmap_read_unlock(mm);
+
+bad_area_nosemaphore:
+
+ /* User mode accesses just cause a SIGSEGV */
+
+ if (user_mode(regs)) {
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ return;
+ }
+
+no_context:
+
+ /* Are we prepared to handle this kernel fault?
+ *
+ * (The kernel has valid exception-points in the source
+ * when it acesses user-memory. When it fails in one
+ * of those points, we find it in a table and do a jump
+ * to some fixup code that loads an appropriate error
+ * code)
+ */
+
+ {
+ const struct exception_table_entry *entry;
+
+ if ((entry = search_exception_tables(regs->pc)) != NULL) {
+ /* Adjust the instruction pointer in the stackframe */
+ regs->pc = entry->fixup;
+ return;
+ }
+ }
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ if ((unsigned long)(address) < PAGE_SIZE)
+ printk(KERN_ALERT
+ "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel access");
+ printk(" at virtual address 0x%08lx\n", address);
+
+ die("Oops", regs, write_acc);
+
+ /*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+
+out_of_memory:
+ mmap_read_unlock(mm);
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
+
+do_sigbus:
+ mmap_read_unlock(mm);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Use current_pgd instead of tsk->active_mm->pgd
+ * since the latter might be unavailable if this
+ * code is executed in a misfortunately run irq
+ * (like inside schedule() between switch_mm and
+ * switch_to...).
+ */
+
+ int offset = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+/*
+ phx_warn("do_page_fault(): vmalloc_fault will not work, "
+ "since current_pgd assign a proper value somewhere\n"
+ "anyhow we don't need this at the moment\n");
+
+ phx_mmu("vmalloc_fault");
+*/
+ pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
+ pgd_k = init_mm.pgd + offset;
+
+ /* Since we're two-level, we don't need to do both
+ * set_pgd and set_pmd (they do the same thing). If
+ * we go three-level at some point, do the right thing
+ * with pgd_present and set_pgd here.
+ *
+ * Also, since the vmalloc area is global, we don't
+ * need to copy individual PTE's, it is enough to
+ * copy the pgd pointer into the pte page of the
+ * root task. If that is there, we'll find our pte if
+ * it exists.
+ */
+
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ goto no_context;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+
+ if (!pmd_present(*pmd_k))
+ goto bad_area_nosemaphore;
+
+ set_pmd(pmd, *pmd_k);
+
+ /* Make sure the actual PTE exists as well to
+ * catch kernel vmalloc-area accesses to non-mapped
+ * addresses. If we don't do this, this will just
+ * silently loop forever.
+ */
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+
+ return;
+ }
+}
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
new file mode 100644
index 000000000..d531ab82b
--- /dev/null
+++ b/arch/openrisc/mm/init.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OpenRISC idle.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/memblock.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pagemap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+
+int mem_init_done;
+
+static void __init zone_sizes_init(void)
+{
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
+
+ /*
+ * We use only ZONE_NORMAL
+ */
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
+
+ free_area_init(max_zone_pfn);
+}
+
+extern const char _s_kernel_ro[], _e_kernel_ro[];
+
+/*
+ * Map all physical memory into kernel's address space.
+ *
+ * This is explicitly coded for two-level page tables, so if you need
+ * something else then this needs to change.
+ */
+static void __init map_ram(void)
+{
+ phys_addr_t start, end;
+ unsigned long v, p, e;
+ pgprot_t prot;
+ pgd_t *pge;
+ p4d_t *p4e;
+ pud_t *pue;
+ pmd_t *pme;
+ pte_t *pte;
+ u64 i;
+ /* These mark extents of read-only kernel pages...
+ * ...from vmlinux.lds.S
+ */
+
+ v = PAGE_OFFSET;
+
+ for_each_mem_range(i, &start, &end) {
+ p = (u32) start & PAGE_MASK;
+ e = (u32) end;
+
+ v = (u32) __va(p);
+ pge = pgd_offset_k(v);
+
+ while (p < e) {
+ int j;
+ p4e = p4d_offset(pge, v);
+ pue = pud_offset(p4e, v);
+ pme = pmd_offset(pue, v);
+
+ if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
+ panic("%s: OR1K kernel hardcoded for "
+ "two-level page tables",
+ __func__);
+ }
+
+ /* Alloc one page for holding PTE's... */
+ pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate page for PTEs\n",
+ __func__);
+ set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
+
+ /* Fill the newly allocated page with PTE'S */
+ for (j = 0; p < e && j < PTRS_PER_PTE;
+ v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
+ if (v >= (u32) _e_kernel_ro ||
+ v < (u32) _s_kernel_ro)
+ prot = PAGE_KERNEL;
+ else
+ prot = PAGE_KERNEL_RO;
+
+ set_pte(pte, mk_pte_phys(p, prot));
+ }
+
+ pge++;
+ }
+
+ printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
+ start, end);
+ }
+}
+
+void __init paging_init(void)
+{
+ extern void tlb_init(void);
+
+ int i;
+
+ printk(KERN_INFO "Setting up paging and PTEs.\n");
+
+ /* clear out the init_mm.pgd that will contain the kernel's mappings */
+
+ for (i = 0; i < PTRS_PER_PGD; i++)
+ swapper_pg_dir[i] = __pgd(0);
+
+ /* make sure the current pgd table points to something sane
+ * (even if it is most probably not used until the next
+ * switch_mm)
+ */
+ current_pgd[smp_processor_id()] = init_mm.pgd;
+
+ map_ram();
+
+ zone_sizes_init();
+
+ /* self modifying code ;) */
+ /* Since the old TLB miss handler has been running up until now,
+ * the kernel pages are still all RW, so we can still modify the
+ * text directly... after this change and a TLB flush, the kernel
+ * pages will become RO.
+ */
+ {
+ extern unsigned long dtlb_miss_handler;
+ extern unsigned long itlb_miss_handler;
+
+ unsigned long *dtlb_vector = __va(0x900);
+ unsigned long *itlb_vector = __va(0xa00);
+
+ printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
+ *itlb_vector = ((unsigned long)&itlb_miss_handler -
+ (unsigned long)itlb_vector) >> 2;
+
+ /* Soft ordering constraint to ensure that dtlb_vector is
+ * the last thing updated
+ */
+ barrier();
+
+ printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
+ *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
+ (unsigned long)dtlb_vector) >> 2;
+
+ }
+
+ /* Soft ordering constraint to ensure that cache invalidation and
+ * TLB flush really happen _after_ code has been modified.
+ */
+ barrier();
+
+ /* Invalidate instruction caches after code modification */
+ mtspr(SPR_ICBIR, 0x900);
+ mtspr(SPR_ICBIR, 0xa00);
+
+ /* New TLB miss handlers and kernel page tables are in now place.
+ * Make sure that page flags get updated for all pages in TLB by
+ * flushing the TLB and forcing all TLB entries to be recreated
+ * from their page table flags.
+ */
+ flush_tlb_all();
+}
+
+/* References to section boundaries */
+
+void __init mem_init(void)
+{
+ BUG_ON(!mem_map);
+
+ max_mapnr = max_low_pfn;
+ high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
+
+ /* clear the zero-page */
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
+
+ /* this will put all low memory onto the freelists */
+ memblock_free_all();
+
+ printk("mem_init_done ...........................................\n");
+ mem_init_done = 1;
+ return;
+}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY_X,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
new file mode 100644
index 000000000..8ec0dafec
--- /dev/null
+++ b/arch/openrisc/mm/ioremap.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OpenRISC ioremap.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/fixmap.h>
+#include <asm/bug.h>
+#include <linux/sched.h>
+#include <asm/tlbflush.h>
+
+extern int mem_init_done;
+
+static unsigned int fixmaps_used __initdata;
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
+{
+ phys_addr_t p;
+ unsigned long v;
+ unsigned long offset, last_addr;
+ struct vm_struct *area = NULL;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
+ return NULL;
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = addr & ~PAGE_MASK;
+ p = addr & PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - p;
+
+ if (likely(mem_init_done)) {
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ v = (unsigned long)area->addr;
+ } else {
+ if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
+ return NULL;
+ v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
+ fixmaps_used += (size >> PAGE_SHIFT);
+ }
+
+ if (ioremap_page_range(v, v + size, p,
+ __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) {
+ if (likely(mem_init_done))
+ vfree(area->addr);
+ else
+ fixmaps_used -= (size >> PAGE_SHIFT);
+ return NULL;
+ }
+
+ return (void __iomem *)(offset + (char *)v);
+}
+EXPORT_SYMBOL(ioremap);
+
+void iounmap(volatile void __iomem *addr)
+{
+ /* If the page is from the fixmap pool then we just clear out
+ * the fixmap mapping.
+ */
+ if (unlikely((unsigned long)addr > FIXADDR_START)) {
+ /* This is a bit broken... we don't really know
+ * how big the area is so it's difficult to know
+ * how many fixed pages to invalidate...
+ * just flush tlb and hope for the best...
+ * consider this a FIXME
+ *
+ * Really we should be clearing out one or more page
+ * table entries for these virtual addresses so that
+ * future references cause a page fault... for now, we
+ * rely on two things:
+ * i) this code never gets called on known boards
+ * ii) invalid accesses to the freed areas aren't made
+ */
+ flush_tlb_all();
+ return;
+ }
+
+ return vfree((void *)(PAGE_MASK & (unsigned long)addr));
+}
+EXPORT_SYMBOL(iounmap);
+
+/**
+ * OK, this one's a bit tricky... ioremap can get called before memory is
+ * initialized (early serial console does this) and will want to alloc a page
+ * for its mapping. No userspace pages will ever get allocated before memory
+ * is initialized so this applies only to kernel pages. In the event that
+ * this is called before memory is initialized we allocate the page using
+ * the memblock infrastructure.
+ */
+
+pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ pte_t *pte;
+
+ if (likely(mem_init_done)) {
+ pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
+ } else {
+ pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+ }
+
+ return pte;
+}
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
new file mode 100644
index 000000000..e2f2a3c3b
--- /dev/null
+++ b/arch/openrisc/mm/tlb.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OpenRISC tlb.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/spr_defs.h>
+
+#define NO_CONTEXT -1
+
+#define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
+ SPR_DMMUCFGR_NTS_OFF))
+#define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
+ SPR_IMMUCFGR_NTS_OFF))
+#define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1))
+#define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1))
+/*
+ * Invalidate all TLB entries.
+ *
+ * This comes down to setting the 'valid' bit for all xTLBMR registers to 0.
+ * Easiest way to accomplish this is to just zero out the xTLBMR register
+ * completely.
+ *
+ */
+
+void local_flush_tlb_all(void)
+{
+ int i;
+ unsigned long num_tlb_sets;
+
+ /* Determine number of sets for IMMU. */
+ /* FIXME: Assumption is I & D nsets equal. */
+ num_tlb_sets = NUM_ITLB_SETS;
+
+ for (i = 0; i < num_tlb_sets; i++) {
+ mtspr_off(SPR_DTLBMR_BASE(0), i, 0);
+ mtspr_off(SPR_ITLBMR_BASE(0), i, 0);
+ }
+}
+
+#define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI)
+#define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI)
+
+/*
+ * Invalidate a single page. This is what the xTLBEIR register is for.
+ *
+ * There's no point in checking the vma for PAGE_EXEC to determine whether it's
+ * the data or instruction TLB that should be flushed... that would take more
+ * than the few instructions that the following compiles down to!
+ *
+ * The case where we don't have the xTLBEIR register really only works for
+ * MMU's with a single way and is hard-coded that way.
+ */
+
+#define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr)
+#define flush_dtlb_page_no_eir(addr) \
+ mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0);
+
+#define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr)
+#define flush_itlb_page_no_eir(addr) \
+ mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ if (have_dtlbeir)
+ flush_dtlb_page_eir(addr);
+ else
+ flush_dtlb_page_no_eir(addr);
+
+ if (have_itlbeir)
+ flush_itlb_page_eir(addr);
+ else
+ flush_itlb_page_no_eir(addr);
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ int addr;
+ bool dtlbeir;
+ bool itlbeir;
+
+ dtlbeir = have_dtlbeir;
+ itlbeir = have_itlbeir;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ if (dtlbeir)
+ flush_dtlb_page_eir(addr);
+ else
+ flush_dtlb_page_no_eir(addr);
+
+ if (itlbeir)
+ flush_itlb_page_eir(addr);
+ else
+ flush_itlb_page_no_eir(addr);
+ }
+}
+
+/*
+ * Invalidate the selected mm context only.
+ *
+ * FIXME: Due to some bug here, we're flushing everything for now.
+ * This should be changed to loop over over mm and call flush_tlb_range.
+ */
+
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+
+ /* Was seeing bugs with the mm struct passed to us. Scrapped most of
+ this function. */
+ /* Several architectures do this */
+ local_flush_tlb_all();
+}
+
+/* called in schedule() just before actually doing the switch_to */
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *next_tsk)
+{
+ unsigned int cpu;
+
+ if (unlikely(prev == next))
+ return;
+
+ cpu = smp_processor_id();
+
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /* remember the pgd for the fault handlers
+ * this is similar to the pgd register in some other CPU's.
+ * we need our own copy of it because current and active_mm
+ * might be invalid at points where we still need to derefer
+ * the pgd.
+ */
+ current_pgd[cpu] = next->pgd;
+
+ /* We don't have context support implemented, so flush all
+ * entries belonging to previous map
+ */
+ local_flush_tlb_mm(prev);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context = NO_CONTEXT;
+ return 0;
+}
+
+/* called by __exit_mm to destroy the used MMU context if any before
+ * destroying the mm itself. this is only called when the last user of the mm
+ * drops it.
+ */
+
+void destroy_context(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+
+}
+
+/* called once during VM initialization, from init.c */
+
+void __init tlb_init(void)
+{
+ /* Do nothing... */
+ /* invalidate the entire TLB */
+ /* flush_tlb_all(); */
+}