summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r--arch/xtensa/mm/Makefile14
-rw-r--r--arch/xtensa/mm/cache.c333
-rw-r--r--arch/xtensa/mm/fault.c256
-rw-r--r--arch/xtensa/mm/highmem.c59
-rw-r--r--arch/xtensa/mm/init.c240
-rw-r--r--arch/xtensa/mm/ioremap.c35
-rw-r--r--arch/xtensa/mm/kasan_init.c102
-rw-r--r--arch/xtensa/mm/misc.S463
-rw-r--r--arch/xtensa/mm/mmu.c119
-rw-r--r--arch/xtensa/mm/tlb.c294
10 files changed, 1915 insertions, 0 deletions
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
new file mode 100644
index 000000000..44153a335
--- /dev/null
+++ b/arch/xtensa/mm/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Linux/Xtensa-specific parts of the memory manager.
+#
+
+obj-y := init.o misc.o
+obj-$(CONFIG_PFAULT) += fault.o
+obj-$(CONFIG_MMU) += cache.o ioremap.o mmu.o tlb.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-$(CONFIG_KASAN) += kasan_init.o
+
+KASAN_SANITIZE_fault.o := n
+KASAN_SANITIZE_kasan_init.o := n
+KASAN_SANITIZE_mmu.o := n
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
new file mode 100644
index 000000000..7ec66a79f
--- /dev/null
+++ b/arch/xtensa/mm/cache.c
@@ -0,0 +1,333 @@
+/*
+ * arch/xtensa/mm/cache.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2006 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor
+ * Marc Gauthier
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/memblock.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include <linux/pgtable.h>
+
+#include <asm/bootparam.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+
+/*
+ * Note:
+ * The kernel provides one architecture bit PG_arch_1 in the page flags that
+ * can be used for cache coherency.
+ *
+ * I$-D$ coherency.
+ *
+ * The Xtensa architecture doesn't keep the instruction cache coherent with
+ * the data cache. We use the architecture bit to indicate if the caches
+ * are coherent. The kernel clears this bit whenever a page is added to the
+ * page cache. At that time, the caches might not be in sync. We, therefore,
+ * define this flag as 'clean' if set.
+ *
+ * D-cache aliasing.
+ *
+ * With cache aliasing, we have to always flush the cache when pages are
+ * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
+ * page.
+ *
+ *
+ *
+ */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+static inline void kmap_invalidate_coherent(struct page *page,
+ unsigned long vaddr)
+{
+ if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
+ unsigned long kvaddr;
+
+ if (!PageHighMem(page)) {
+ kvaddr = (unsigned long)page_to_virt(page);
+
+ __invalidate_dcache_page(kvaddr);
+ } else {
+ kvaddr = TLBTEMP_BASE_1 +
+ (page_to_phys(page) & DCACHE_ALIAS_MASK);
+
+ preempt_disable();
+ __invalidate_dcache_page_alias(kvaddr,
+ page_to_phys(page));
+ preempt_enable();
+ }
+ }
+}
+
+static inline void *coherent_kvaddr(struct page *page, unsigned long base,
+ unsigned long vaddr, unsigned long *paddr)
+{
+ *paddr = page_to_phys(page);
+ return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
+}
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ unsigned long paddr;
+ void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
+
+ preempt_disable();
+ kmap_invalidate_coherent(page, vaddr);
+ set_bit(PG_arch_1, &page->flags);
+ clear_page_alias(kvaddr, paddr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(clear_user_highpage);
+
+void copy_user_highpage(struct page *dst, struct page *src,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ unsigned long dst_paddr, src_paddr;
+ void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
+ &dst_paddr);
+ void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
+ &src_paddr);
+
+ preempt_disable();
+ kmap_invalidate_coherent(dst, vaddr);
+ set_bit(PG_arch_1, &dst->flags);
+ copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(copy_user_highpage);
+
+/*
+ * Any time the kernel writes to a user page cache page, or it is about to
+ * read from a page cache page this routine is called.
+ *
+ */
+
+void flush_dcache_folio(struct folio *folio)
+{
+ struct address_space *mapping = folio_flush_mapping(folio);
+
+ /*
+ * If we have a mapping but the page is not mapped to user-space
+ * yet, we simply mark this page dirty and defer flushing the
+ * caches until update_mmu().
+ */
+
+ if (mapping && !mapping_mapped(mapping)) {
+ if (!test_bit(PG_arch_1, &folio->flags))
+ set_bit(PG_arch_1, &folio->flags);
+ return;
+
+ } else {
+ unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
+ unsigned long temp = folio_pos(folio);
+ unsigned int i, nr = folio_nr_pages(folio);
+ unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
+ unsigned long virt;
+
+ /*
+ * Flush the page in kernel space and user space.
+ * Note that we can omit that step if aliasing is not
+ * an issue, but we do have to synchronize I$ and D$
+ * if we have a mapping.
+ */
+
+ if (!alias && !mapping)
+ return;
+
+ preempt_disable();
+ for (i = 0; i < nr; i++) {
+ virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(virt, phys);
+
+ virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
+
+ if (alias)
+ __flush_invalidate_dcache_page_alias(virt, phys);
+
+ if (mapping)
+ __invalidate_icache_page_alias(virt, phys);
+ phys += PAGE_SIZE;
+ temp += PAGE_SIZE;
+ }
+ preempt_enable();
+ }
+
+ /* There shouldn't be an entry in the cache for this page anymore. */
+}
+EXPORT_SYMBOL(flush_dcache_folio);
+
+/*
+ * For now, flush the whole cache. FIXME??
+ */
+
+void local_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ __flush_invalidate_dcache_all();
+ __invalidate_icache_all();
+}
+EXPORT_SYMBOL(local_flush_cache_range);
+
+/*
+ * Remove any entry in the cache for this page.
+ *
+ * Note that this function is only called for user pages, so use the
+ * alias versions of the cache flush functions.
+ */
+
+void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
+ unsigned long pfn)
+{
+ /* Note that we have to use the 'alias' address to avoid multi-hit */
+
+ unsigned long phys = page_to_phys(pfn_to_page(pfn));
+ unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
+
+ preempt_disable();
+ __flush_invalidate_dcache_page_alias(virt, phys);
+ __invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
+}
+EXPORT_SYMBOL(local_flush_cache_page);
+
+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
+
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
+{
+ unsigned long pfn = pte_pfn(*ptep);
+ struct folio *folio;
+ unsigned int i;
+
+ if (!pfn_valid(pfn))
+ return;
+
+ folio = page_folio(pfn_to_page(pfn));
+
+ /* Invalidate old entries in TLBs */
+ for (i = 0; i < nr; i++)
+ flush_tlb_page(vma, addr + i * PAGE_SIZE);
+ nr = folio_nr_pages(folio);
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+ if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
+ unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
+ unsigned long tmp;
+
+ preempt_disable();
+ for (i = 0; i < nr; i++) {
+ tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(tmp, phys);
+ tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(tmp, phys);
+ __invalidate_icache_page_alias(tmp, phys);
+ phys += PAGE_SIZE;
+ }
+ preempt_enable();
+
+ clear_bit(PG_arch_1, &folio->flags);
+ }
+#else
+ if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
+ && (vma->vm_flags & VM_EXEC) != 0) {
+ for (i = 0; i < nr; i++) {
+ void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
+ __flush_dcache_page((unsigned long)paddr);
+ __invalidate_icache_page((unsigned long)paddr);
+ kunmap_local(paddr);
+ }
+ set_bit(PG_arch_1, &folio->flags);
+ }
+#endif
+}
+
+/*
+ * access_process_vm() has called get_user_pages(), which has done a
+ * flush_dcache_page() on the page.
+ */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ unsigned long phys = page_to_phys(page);
+ unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
+
+ /* Flush and invalidate user page if aliased. */
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
+ __flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
+ }
+
+ /* Copy data */
+
+ memcpy(dst, src, len);
+
+ /*
+ * Flush and invalidate kernel page if aliased and synchronize
+ * data and instruction caches for executable pages.
+ */
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+
+ preempt_disable();
+ __flush_invalidate_dcache_range((unsigned long) dst, len);
+ if ((vma->vm_flags & VM_EXEC) != 0)
+ __invalidate_icache_page_alias(t, phys);
+ preempt_enable();
+
+ } else if ((vma->vm_flags & VM_EXEC) != 0) {
+ __flush_dcache_range((unsigned long)dst,len);
+ __invalidate_icache_range((unsigned long) dst, len);
+ }
+}
+
+extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ unsigned long phys = page_to_phys(page);
+ unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
+
+ /*
+ * Flush user page if aliased.
+ * (Note: a simply flush would be sufficient)
+ */
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
+ __flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
+ }
+
+ memcpy(dst, src, len);
+}
+
+#endif
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
new file mode 100644
index 000000000..16e11b6f6
--- /dev/null
+++ b/arch/xtensa/mm/fault.c
@@ -0,0 +1,256 @@
+// TODO VM_EXEC flag work-around, cache aliasing
+/*
+ * arch/xtensa/mm/fault.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2010 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/extable.h>
+#include <linux/hardirq.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/hardirq.h>
+#include <asm/traps.h>
+
+void bad_page_fault(struct pt_regs*, unsigned long, int);
+
+static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
+{
+#ifdef CONFIG_MMU
+ /* Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ struct mm_struct *act_mm = current->active_mm;
+ int index = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ if (act_mm == NULL)
+ goto bad_page_fault;
+
+ pgd = act_mm->pgd + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto bad_page_fault;
+
+ pgd_val(*pgd) = pgd_val(*pgd_k);
+
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
+ goto bad_page_fault;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
+ if (!pud_present(*pud) || !pud_present(*pud_k))
+ goto bad_page_fault;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
+ goto bad_page_fault;
+
+ pmd_val(*pmd) = pmd_val(*pmd_k);
+ pte_k = pte_offset_kernel(pmd_k, address);
+
+ if (!pte_present(*pte_k))
+ goto bad_page_fault;
+ return;
+
+bad_page_fault:
+ bad_page_fault(regs, address, SIGKILL);
+#else
+ WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
+#endif
+}
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * Note: does not handle Miss and MultiHit.
+ */
+
+void do_page_fault(struct pt_regs *regs)
+{
+ struct vm_area_struct * vma;
+ struct mm_struct *mm = current->mm;
+ unsigned int exccause = regs->exccause;
+ unsigned int address = regs->excvaddr;
+ int code;
+
+ int is_write, is_exec;
+ vm_fault_t fault;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
+
+ code = SEGV_MAPERR;
+
+ /* We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ */
+ if (address >= TASK_SIZE && !user_mode(regs)) {
+ vmalloc_fault(regs, address);
+ return;
+ }
+
+ /* If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (faulthandler_disabled() || !mm) {
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
+ }
+
+ is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
+ is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
+ exccause == EXCCAUSE_ITLB_MISS ||
+ exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
+
+ pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
+ current->comm, current->pid,
+ address, exccause, regs->pc,
+ is_write ? "w" : "", is_exec ? "x" : "");
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+retry:
+ vma = lock_mm_and_find_vma(mm, address, regs);
+ if (!vma)
+ goto bad_area_nosemaphore;
+
+ /* Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+
+ code = SEGV_ACCERR;
+
+ if (is_write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
+ } else if (is_exec) {
+ if (!(vma->vm_flags & VM_EXEC))
+ goto bad_area;
+ } else /* Allow read even from write-only pages. */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+ goto bad_area;
+
+ /* If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ bad_page_fault(regs, address, SIGKILL);
+ return;
+ }
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+
+ mmap_read_unlock(mm);
+ return;
+
+ /* Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ mmap_read_unlock(mm);
+bad_area_nosemaphore:
+ if (user_mode(regs)) {
+ force_sig_fault(SIGSEGV, code, (void *) address);
+ return;
+ }
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
+
+
+ /* We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ mmap_read_unlock(mm);
+ if (!user_mode(regs))
+ bad_page_fault(regs, address, SIGKILL);
+ else
+ pagefault_out_of_memory();
+ return;
+
+do_sigbus:
+ mmap_read_unlock(mm);
+
+ /* Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ bad_page_fault(regs, address, SIGBUS);
+ return;
+}
+
+
+void
+bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
+{
+ extern void __noreturn die(const char*, struct pt_regs*, long);
+ const struct exception_table_entry *entry;
+
+ /* Are we prepared to handle this kernel fault? */
+ if ((entry = search_exception_tables(regs->pc)) != NULL) {
+ pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
+ current->comm, regs->pc, entry->fixup);
+ regs->pc = entry->fixup;
+ return;
+ }
+
+ /* Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ pr_alert("Unable to handle kernel paging request at virtual "
+ "address %08lx\n pc = %08lx, ra = %08lx\n",
+ address, regs->pc, regs->areg[0]);
+ die("Oops", regs, sig);
+}
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
new file mode 100644
index 000000000..35c4f7d4a
--- /dev/null
+++ b/arch/xtensa/mm/highmem.c
@@ -0,0 +1,59 @@
+/*
+ * High memory support for Xtensa architecture
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
+wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
+
+static void __init kmap_waitqueues_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i)
+ init_waitqueue_head(pkmap_map_wait_arr + i);
+}
+
+static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
+{
+ int idx = (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS;
+
+ /*
+ * The fixmap operates top down, so the color offset needs to be
+ * reverse as well.
+ */
+ return idx + DCACHE_N_COLORS - 1 - color;
+}
+
+enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn)
+{
+ return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT));
+}
+
+enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr)
+{
+ return kmap_idx(type, DCACHE_ALIAS(addr));
+}
+
+#else
+static inline void kmap_waitqueues_init(void) { }
+#endif
+
+void __init kmap_init(void)
+{
+ /* Check if this memory layout is broken because PKMAP overlaps
+ * page table.
+ */
+ BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
+ kmap_waitqueues_init();
+}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
new file mode 100644
index 000000000..b2587a1a7
--- /dev/null
+++ b/arch/xtensa/mm/init.c
@@ -0,0 +1,240 @@
+/*
+ * arch/xtensa/mm/init.c
+ *
+ * Derived from MIPS, PPC.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Marc Gauthier
+ * Kevin Chea
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/memblock.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/mm.h>
+#include <linux/of_fdt.h>
+#include <linux/dma-map-ops.h>
+
+#include <asm/bootparam.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+#include <asm/sysmem.h>
+
+/*
+ * Initialize the bootmem system and give it all low memory we have available.
+ */
+
+void __init bootmem_init(void)
+{
+ /* Reserve all memory below PHYS_OFFSET, as memory
+ * accounting doesn't work for pages below that address.
+ *
+ * If PHYS_OFFSET is zero reserve page at address 0:
+ * successfull allocations should never return NULL.
+ */
+ memblock_reserve(0, PHYS_OFFSET ? PHYS_OFFSET : 1);
+
+ early_init_fdt_scan_reserved_mem();
+
+ if (!memblock_phys_mem_size())
+ panic("No memory found!\n");
+
+ min_low_pfn = PFN_UP(memblock_start_of_DRAM());
+ min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
+ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_low_pfn = min(max_pfn, MAX_LOW_PFN);
+
+ early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
+ (phys_addr_t)max_low_pfn << PAGE_SHIFT);
+
+ memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+ dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
+
+ memblock_dump_all();
+}
+
+
+void __init zones_init(void)
+{
+ /* All pages are DMA-able, so we put them all in the DMA zone. */
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = {
+ [ZONE_NORMAL] = max_low_pfn,
+#ifdef CONFIG_HIGHMEM
+ [ZONE_HIGHMEM] = max_pfn,
+#endif
+ };
+ free_area_init(max_zone_pfn);
+}
+
+static void __init free_highpages(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long max_low = max_low_pfn;
+ phys_addr_t range_start, range_end;
+ u64 i;
+
+ /* set highmem page free */
+ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+ &range_start, &range_end, NULL) {
+ unsigned long start = PFN_UP(range_start);
+ unsigned long end = PFN_DOWN(range_end);
+
+ /* Ignore complete lowmem entries */
+ if (end <= max_low)
+ continue;
+
+ /* Truncate partial highmem entries */
+ if (start < max_low)
+ start = max_low;
+
+ for (; start < end; start++)
+ free_highmem_page(pfn_to_page(start));
+ }
+#endif
+}
+
+/*
+ * Initialize memory pages.
+ */
+
+void __init mem_init(void)
+{
+ free_highpages();
+
+ max_mapnr = max_pfn - ARCH_PFN_OFFSET;
+ high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
+
+ memblock_free_all();
+
+ pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_KASAN
+ " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
+#endif
+#ifdef CONFIG_MMU
+ " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
+#endif
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+#endif
+ " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n"
+ " .text : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .data : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .init : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n",
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
+ KASAN_SHADOW_SIZE >> 20,
+#endif
+#ifdef CONFIG_MMU
+ VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
+#ifdef CONFIG_HIGHMEM
+ PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+ (LAST_PKMAP*PAGE_SIZE) >> 10,
+ FIXADDR_START, FIXADDR_END,
+ (FIXADDR_END - FIXADDR_START) >> 10,
+#endif
+ PAGE_OFFSET, PAGE_OFFSET +
+ (max_low_pfn - min_low_pfn) * PAGE_SIZE,
+#else
+ min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
+#endif
+ ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
+ (unsigned long)_text, (unsigned long)_etext,
+ (unsigned long)(_etext - _text) >> 10,
+ (unsigned long)__start_rodata, (unsigned long)__end_rodata,
+ (unsigned long)(__end_rodata - __start_rodata) >> 10,
+ (unsigned long)_sdata, (unsigned long)_edata,
+ (unsigned long)(_edata - _sdata) >> 10,
+ (unsigned long)__init_begin, (unsigned long)__init_end,
+ (unsigned long)(__init_end - __init_begin) >> 10,
+ (unsigned long)__bss_start, (unsigned long)__bss_stop,
+ (unsigned long)(__bss_stop - __bss_start) >> 10);
+}
+
+static void __init parse_memmap_one(char *p)
+{
+ char *oldp;
+ unsigned long start_at, mem_size;
+
+ if (!p)
+ return;
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return;
+
+ switch (*p) {
+ case '@':
+ start_at = memparse(p + 1, &p);
+ memblock_add(start_at, mem_size);
+ break;
+
+ case '$':
+ start_at = memparse(p + 1, &p);
+ memblock_reserve(start_at, mem_size);
+ break;
+
+ case 0:
+ memblock_reserve(mem_size, -mem_size);
+ break;
+
+ default:
+ pr_warn("Unrecognized memmap syntax: %s\n", p);
+ break;
+ }
+}
+
+static int __init parse_memmap_opt(char *str)
+{
+ while (str) {
+ char *k = strchr(str, ',');
+
+ if (k)
+ *k++ = 0;
+
+ parse_memmap_one(str);
+ str = k;
+ }
+
+ return 0;
+}
+early_param("memmap", parse_memmap_opt);
+
+#ifdef CONFIG_MMU
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+DECLARE_VM_GET_PAGE_PROT
+#endif
diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c
new file mode 100644
index 000000000..8ca660b7a
--- /dev/null
+++ b/arch/xtensa/mm/ioremap.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ioremap implementation.
+ *
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#include <linux/io.h>
+#include <linux/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
+{
+ unsigned long pfn = __phys_to_pfn((phys_addr));
+ WARN_ON(pfn_valid(pfn));
+
+ return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+void iounmap(volatile void __iomem *addr)
+{
+ unsigned long va = (unsigned long) addr;
+
+ if ((va >= XCHAL_KIO_CACHED_VADDR &&
+ va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) ||
+ (va >= XCHAL_KIO_BYPASS_VADDR &&
+ va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
+ return;
+
+ generic_iounmap(addr);
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
new file mode 100644
index 000000000..f00d122aa
--- /dev/null
+++ b/arch/xtensa/mm/kasan_init.c
@@ -0,0 +1,102 @@
+/*
+ * Xtensa KASAN shadow map initialization
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2017 Cadence Design Systems Inc.
+ */
+
+#include <linux/memblock.h>
+#include <linux/init_task.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <asm/initialize_mmu.h>
+#include <asm/tlbflush.h>
+
+void __init kasan_early_init(void)
+{
+ unsigned long vaddr = KASAN_SHADOW_START;
+ pmd_t *pmd = pmd_off_k(vaddr);
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_early_shadow_pte + i,
+ mk_pte(virt_to_page(kasan_early_shadow_page),
+ PAGE_KERNEL));
+
+ for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
+ BUG_ON(!pmd_none(*pmd));
+ set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
+ }
+}
+
+static void __init populate(void *start, void *end)
+{
+ unsigned long n_pages = (end - start) / PAGE_SIZE;
+ unsigned long n_pmds = n_pages / PTRS_PER_PTE;
+ unsigned long i, j;
+ unsigned long vaddr = (unsigned long)start;
+ pmd_t *pmd = pmd_off_k(vaddr);
+ pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
+
+ pr_debug("%s: %p - %p\n", __func__, start, end);
+
+ for (i = j = 0; i < n_pmds; ++i) {
+ int k;
+
+ for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
+ phys_addr_t phys =
+ memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
+ 0,
+ MEMBLOCK_ALLOC_ANYWHERE);
+
+ if (!phys)
+ panic("Failed to allocate page table page\n");
+
+ set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+ }
+ }
+
+ for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
+ set_pmd(pmd + i, __pmd((unsigned long)pte));
+
+ local_flush_tlb_all();
+ memset(start, 0, end - start);
+}
+
+void __init kasan_init(void)
+{
+ int i;
+
+ BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
+ (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
+ BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
+
+ /*
+ * Replace shadow map pages that cover addresses from VMALLOC area
+ * start to the end of KSEG with clean writable pages.
+ */
+ populate(kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
+
+ /*
+ * Write protect kasan_early_shadow_page and zero-initialize it again.
+ */
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_early_shadow_pte + i,
+ mk_pte(virt_to_page(kasan_early_shadow_page),
+ PAGE_KERNEL_RO));
+
+ local_flush_tlb_all();
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+
+ /* At this point kasan is fully initialized. Enable error messages. */
+ current->kasan_depth = 0;
+ pr_info("KernelAddressSanitizer initialized\n");
+}
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
new file mode 100644
index 000000000..ec36f73c4
--- /dev/null
+++ b/arch/xtensa/mm/misc.S
@@ -0,0 +1,463 @@
+/*
+ * arch/xtensa/mm/misc.S
+ *
+ * Miscellaneous assembly functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+
+#include <linux/linkage.h>
+#include <linux/pgtable.h>
+#include <asm/page.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheasm.h>
+#include <asm/tlbflush.h>
+
+
+/*
+ * clear_page and clear_user_page are the same for non-cache-aliased configs.
+ *
+ * clear_page (unsigned long page)
+ * a2
+ */
+
+ENTRY(clear_page)
+
+ abi_entry_default
+
+ movi a3, 0
+ __loopi a2, a7, PAGE_SIZE, 32
+ s32i a3, a2, 0
+ s32i a3, a2, 4
+ s32i a3, a2, 8
+ s32i a3, a2, 12
+ s32i a3, a2, 16
+ s32i a3, a2, 20
+ s32i a3, a2, 24
+ s32i a3, a2, 28
+ __endla a2, a7, 32
+
+ abi_ret_default
+
+ENDPROC(clear_page)
+EXPORT_SYMBOL(clear_page)
+
+/*
+ * copy_page and copy_user_page are the same for non-cache-aliased configs.
+ *
+ * copy_page (void *to, void *from)
+ * a2 a3
+ */
+
+ENTRY(copy_page)
+
+ abi_entry_default
+
+ __loopi a2, a4, PAGE_SIZE, 32
+
+ l32i a8, a3, 0
+ l32i a9, a3, 4
+ s32i a8, a2, 0
+ s32i a9, a2, 4
+
+ l32i a8, a3, 8
+ l32i a9, a3, 12
+ s32i a8, a2, 8
+ s32i a9, a2, 12
+
+ l32i a8, a3, 16
+ l32i a9, a3, 20
+ s32i a8, a2, 16
+ s32i a9, a2, 20
+
+ l32i a8, a3, 24
+ l32i a9, a3, 28
+ s32i a8, a2, 24
+ s32i a9, a2, 28
+
+ addi a2, a2, 32
+ addi a3, a3, 32
+
+ __endl a2, a4
+
+ abi_ret_default
+
+ENDPROC(copy_page)
+EXPORT_SYMBOL(copy_page)
+
+#ifdef CONFIG_MMU
+/*
+ * If we have to deal with cache aliasing, we use temporary memory mappings
+ * to ensure that the source and destination pages have the same color as
+ * the virtual address. We use way 0 and 1 for temporary mappings in such cases.
+ *
+ * The temporary DTLB entries shouldn't be flushed by interrupts, but are
+ * flushed by preemptive task switches. Special code in the
+ * fast_second_level_miss handler re-established the temporary mapping.
+ * It requires that the PPNs for the destination and source addresses are
+ * in a6, and a7, respectively.
+ */
+
+/* TLB miss exceptions are treated special in the following region */
+
+ENTRY(__tlbtemp_mapping_start)
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+/*
+ * clear_page_alias(void *addr, unsigned long paddr)
+ * a2 a3
+ */
+
+ENTRY(clear_page_alias)
+
+ abi_entry_default
+
+ movi a5, PAGE_OFFSET
+ addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ mov a4, a2
+ wdtlb a6, a2
+ dsync
+
+ movi a3, 0
+ __loopi a2, a7, PAGE_SIZE, 32
+ s32i a3, a2, 0
+ s32i a3, a2, 4
+ s32i a3, a2, 8
+ s32i a3, a2, 12
+ s32i a3, a2, 16
+ s32i a3, a2, 20
+ s32i a3, a2, 24
+ s32i a3, a2, 28
+ __endla a2, a7, 32
+
+ /* We need to invalidate the temporary dtlb entry. */
+
+ idtlb a4
+ dsync
+
+ abi_ret_default
+
+ENDPROC(clear_page_alias)
+
+/*
+ * copy_page_alias(void *to, void *from,
+ * a2 a3
+ * unsigned long to_paddr, unsigned long from_paddr)
+ * a4 a5
+ */
+
+ENTRY(copy_page_alias)
+
+ abi_entry_default
+
+ /* Setup a temporary DTLB for destination. */
+
+ addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ wdtlb a6, a2
+ dsync
+
+ /* Setup a temporary DTLB for source. */
+
+ addi a7, a5, PAGE_KERNEL
+ addi a8, a3, 1 # way1
+
+ wdtlb a7, a8
+ dsync
+
+1: __loopi a2, a4, PAGE_SIZE, 32
+
+ l32i a8, a3, 0
+ l32i a9, a3, 4
+ s32i a8, a2, 0
+ s32i a9, a2, 4
+
+ l32i a8, a3, 8
+ l32i a9, a3, 12
+ s32i a8, a2, 8
+ s32i a9, a2, 12
+
+ l32i a8, a3, 16
+ l32i a9, a3, 20
+ s32i a8, a2, 16
+ s32i a9, a2, 20
+
+ l32i a8, a3, 24
+ l32i a9, a3, 28
+ s32i a8, a2, 24
+ s32i a9, a2, 28
+
+ addi a2, a2, 32
+ addi a3, a3, 32
+
+ __endl a2, a4
+
+ /* We need to invalidate any temporary mapping! */
+
+ addi a2, a2, -PAGE_SIZE
+ idtlb a2
+ dsync
+
+ addi a3, a3, -PAGE_SIZE+1
+ idtlb a3
+ dsync
+
+ abi_ret_default
+
+ENDPROC(copy_page_alias)
+
+#endif
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+/*
+ * void __flush_invalidate_dcache_page_alias (addr, phys)
+ * a2 a3
+ */
+
+ENTRY(__flush_invalidate_dcache_page_alias)
+
+ abi_entry_default
+
+ movi a7, 0 # required for exception handler
+ addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ mov a4, a2
+ wdtlb a6, a2
+ dsync
+
+ ___flush_invalidate_dcache_page a2 a3
+
+ idtlb a4
+ dsync
+
+ abi_ret_default
+
+ENDPROC(__flush_invalidate_dcache_page_alias)
+
+/*
+ * void __invalidate_dcache_page_alias (addr, phys)
+ * a2 a3
+ */
+
+ENTRY(__invalidate_dcache_page_alias)
+
+ abi_entry_default
+
+ movi a7, 0 # required for exception handler
+ addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ mov a4, a2
+ wdtlb a6, a2
+ dsync
+
+ ___invalidate_dcache_page a2 a3
+
+ idtlb a4
+ dsync
+
+ abi_ret_default
+
+ENDPROC(__invalidate_dcache_page_alias)
+#endif
+
+ENTRY(__tlbtemp_mapping_itlb)
+
+#if (ICACHE_WAY_SIZE > PAGE_SIZE)
+
+ENTRY(__invalidate_icache_page_alias)
+
+ abi_entry_default
+
+ addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
+ mov a4, a2
+ witlb a6, a2
+ isync
+
+ ___invalidate_icache_page a2 a3
+
+ iitlb a4
+ isync
+ abi_ret_default
+
+ENDPROC(__invalidate_icache_page_alias)
+
+#endif
+
+/* End of special treatment in tlb miss exception */
+
+ENTRY(__tlbtemp_mapping_end)
+
+#endif /* CONFIG_MMU
+
+/*
+ * void __invalidate_icache_page(ulong start)
+ */
+
+ENTRY(__invalidate_icache_page)
+
+ abi_entry_default
+
+ ___invalidate_icache_page a2 a3
+ isync
+
+ abi_ret_default
+
+ENDPROC(__invalidate_icache_page)
+
+/*
+ * void __invalidate_dcache_page(ulong start)
+ */
+
+ENTRY(__invalidate_dcache_page)
+
+ abi_entry_default
+
+ ___invalidate_dcache_page a2 a3
+ dsync
+
+ abi_ret_default
+
+ENDPROC(__invalidate_dcache_page)
+
+/*
+ * void __flush_invalidate_dcache_page(ulong start)
+ */
+
+ENTRY(__flush_invalidate_dcache_page)
+
+ abi_entry_default
+
+ ___flush_invalidate_dcache_page a2 a3
+
+ dsync
+ abi_ret_default
+
+ENDPROC(__flush_invalidate_dcache_page)
+
+/*
+ * void __flush_dcache_page(ulong start)
+ */
+
+ENTRY(__flush_dcache_page)
+
+ abi_entry_default
+
+ ___flush_dcache_page a2 a3
+
+ dsync
+ abi_ret_default
+
+ENDPROC(__flush_dcache_page)
+
+/*
+ * void __invalidate_icache_range(ulong start, ulong size)
+ */
+
+ENTRY(__invalidate_icache_range)
+
+ abi_entry_default
+
+ ___invalidate_icache_range a2 a3 a4
+ isync
+
+ abi_ret_default
+
+ENDPROC(__invalidate_icache_range)
+EXPORT_SYMBOL(__invalidate_icache_range)
+
+/*
+ * void __flush_invalidate_dcache_range(ulong start, ulong size)
+ */
+
+ENTRY(__flush_invalidate_dcache_range)
+
+ abi_entry_default
+
+ ___flush_invalidate_dcache_range a2 a3 a4
+ dsync
+
+ abi_ret_default
+
+ENDPROC(__flush_invalidate_dcache_range)
+
+/*
+ * void _flush_dcache_range(ulong start, ulong size)
+ */
+
+ENTRY(__flush_dcache_range)
+
+ abi_entry_default
+
+ ___flush_dcache_range a2 a3 a4
+ dsync
+
+ abi_ret_default
+
+ENDPROC(__flush_dcache_range)
+EXPORT_SYMBOL(__flush_dcache_range)
+
+/*
+ * void _invalidate_dcache_range(ulong start, ulong size)
+ */
+
+ENTRY(__invalidate_dcache_range)
+
+ abi_entry_default
+
+ ___invalidate_dcache_range a2 a3 a4
+
+ abi_ret_default
+
+ENDPROC(__invalidate_dcache_range)
+EXPORT_SYMBOL(__invalidate_dcache_range)
+
+/*
+ * void _invalidate_icache_all(void)
+ */
+
+ENTRY(__invalidate_icache_all)
+
+ abi_entry_default
+
+ ___invalidate_icache_all a2 a3
+ isync
+
+ abi_ret_default
+
+ENDPROC(__invalidate_icache_all)
+
+/*
+ * void _flush_invalidate_dcache_all(void)
+ */
+
+ENTRY(__flush_invalidate_dcache_all)
+
+ abi_entry_default
+
+ ___flush_invalidate_dcache_all a2 a3
+ dsync
+
+ abi_ret_default
+
+ENDPROC(__flush_invalidate_dcache_all)
+
+/*
+ * void _invalidate_dcache_all(void)
+ */
+
+ENTRY(__invalidate_dcache_all)
+
+ abi_entry_default
+
+ ___invalidate_dcache_all a2 a3
+ dsync
+
+ abi_ret_default
+
+ENDPROC(__invalidate_dcache_all)
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
new file mode 100644
index 000000000..92e158c69
--- /dev/null
+++ b/arch/xtensa/mm/mmu.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xtensa mmu stuff
+ *
+ * Extracted from init.c
+ */
+#include <linux/memblock.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/cache.h>
+
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/initialize_mmu.h>
+#include <asm/io.h>
+
+DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
+
+#if defined(CONFIG_HIGHMEM)
+static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
+{
+ pmd_t *pmd = pmd_off_k(vaddr);
+ pte_t *pte;
+ unsigned long i;
+
+ n_pages = ALIGN(n_pages, PTRS_PER_PTE);
+
+ pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
+ __func__, vaddr, n_pages);
+
+ pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
+
+ for (i = 0; i < n_pages; ++i)
+ pte_clear(NULL, 0, pte + i);
+
+ for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
+ pte_t *cur_pte = pte + i;
+
+ BUG_ON(!pmd_none(*pmd));
+ set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
+ BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
+ pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
+ __func__, pmd, cur_pte);
+ }
+ return pte;
+}
+
+static void __init fixedrange_init(void)
+{
+ BUILD_BUG_ON(FIXADDR_START < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
+ init_pmd(FIXADDR_START, __end_of_fixed_addresses);
+}
+#endif
+
+void __init paging_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ fixedrange_init();
+ pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
+ kmap_init();
+#endif
+}
+
+/*
+ * Flush the mmu and reset associated register to default values.
+ */
+void init_mmu(void)
+{
+#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
+ /*
+ * Writing zeros to the instruction and data TLBCFG special
+ * registers ensure that valid values exist in the register.
+ *
+ * For existing PGSZID<w> fields, zero selects the first element
+ * of the page-size array. For nonexistent PGSZID<w> fields,
+ * zero is the best value to write. Also, when changing PGSZID<w>
+ * fields, the corresponding TLB must be flushed.
+ */
+ set_itlbcfg_register(0);
+ set_dtlbcfg_register(0);
+#endif
+ init_kio();
+ local_flush_tlb_all();
+
+ /* Set rasid register to a known value. */
+
+ set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
+
+ /* Set PTEVADDR special register to the start of the page
+ * table, which is in kernel mappable space (ie. not
+ * statically mapped). This register's value is undefined on
+ * reset.
+ */
+ set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR);
+}
+
+void init_kio(void)
+{
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
+ /*
+ * Update the IO area mapping in case xtensa_kio_paddr has changed
+ */
+ write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
+ XCHAL_KIO_CACHED_VADDR + 6);
+ write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
+ XCHAL_KIO_CACHED_VADDR + 6);
+ write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
+ XCHAL_KIO_BYPASS_VADDR + 6);
+ write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
+ XCHAL_KIO_BYPASS_VADDR + 6);
+#endif
+}
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
new file mode 100644
index 000000000..4f974b748
--- /dev/null
+++ b/arch/xtensa/mm/tlb.c
@@ -0,0 +1,294 @@
+/*
+ * arch/xtensa/mm/tlb.c
+ *
+ * Logic that manipulates the Xtensa MMU. Derived from MIPS.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2003 Tensilica Inc.
+ *
+ * Joe Taylor
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier
+ */
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+
+static inline void __flush_itlb_all (void)
+{
+ int w, i;
+
+ for (w = 0; w < ITLB_ARF_WAYS; w++) {
+ for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
+ int e = w + (i << PAGE_SHIFT);
+ invalidate_itlb_entry_no_isync(e);
+ }
+ }
+ asm volatile ("isync\n");
+}
+
+static inline void __flush_dtlb_all (void)
+{
+ int w, i;
+
+ for (w = 0; w < DTLB_ARF_WAYS; w++) {
+ for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
+ int e = w + (i << PAGE_SHIFT);
+ invalidate_dtlb_entry_no_isync(e);
+ }
+ }
+ asm volatile ("isync\n");
+}
+
+
+void local_flush_tlb_all(void)
+{
+ __flush_itlb_all();
+ __flush_dtlb_all();
+}
+
+/* If mm is current, we simply assign the current task a new ASID, thus,
+ * invalidating all previous tlb entries. If mm is someone else's user mapping,
+ * wie invalidate the context, thus, when that user mapping is swapped in,
+ * a new context will be assigned to it.
+ */
+
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ int cpu = smp_processor_id();
+
+ if (mm == current->active_mm) {
+ unsigned long flags;
+ local_irq_save(flags);
+ mm->context.asid[cpu] = NO_CONTEXT;
+ activate_context(mm, cpu);
+ local_irq_restore(flags);
+ } else {
+ mm->context.asid[cpu] = NO_CONTEXT;
+ mm->context.cpu = -1;
+ }
+}
+
+
+#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
+#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
+#if _ITLB_ENTRIES > _DTLB_ENTRIES
+# define _TLB_ENTRIES _ITLB_ENTRIES
+#else
+# define _TLB_ENTRIES _DTLB_ENTRIES
+#endif
+
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ int cpu = smp_processor_id();
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long flags;
+
+ if (mm->context.asid[cpu] == NO_CONTEXT)
+ return;
+
+ pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
+ (unsigned long)mm->context.asid[cpu], start, end);
+ local_irq_save(flags);
+
+ if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
+ int oldpid = get_rasid_register();
+
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
+ start &= PAGE_MASK;
+ if (vma->vm_flags & VM_EXEC)
+ while(start < end) {
+ invalidate_itlb_mapping(start);
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+ else
+ while(start < end) {
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+
+ set_rasid_register(oldpid);
+ } else {
+ local_flush_tlb_mm(mm);
+ }
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int cpu = smp_processor_id();
+ struct mm_struct* mm = vma->vm_mm;
+ unsigned long flags;
+ int oldpid;
+
+ if (mm->context.asid[cpu] == NO_CONTEXT)
+ return;
+
+ local_irq_save(flags);
+
+ oldpid = get_rasid_register();
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
+
+ if (vma->vm_flags & VM_EXEC)
+ invalidate_itlb_mapping(page);
+ invalidate_dtlb_mapping(page);
+
+ set_rasid_register(oldpid);
+
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
+ end - start < _TLB_ENTRIES << PAGE_SHIFT) {
+ start &= PAGE_MASK;
+ while (start < end) {
+ invalidate_itlb_mapping(start);
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+ } else {
+ local_flush_tlb_all();
+ }
+}
+
+void update_mmu_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ local_flush_tlb_page(vma, address);
+}
+
+#ifdef CONFIG_DEBUG_TLB_SANITY
+
+static unsigned get_pte_for_vaddr(unsigned vaddr)
+{
+ struct task_struct *task = get_current();
+ struct mm_struct *mm = task->mm;
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned int pteval;
+
+ if (!mm)
+ mm = task->active_mm;
+ pgd = pgd_offset(mm, vaddr);
+ if (pgd_none_or_clear_bad(pgd))
+ return 0;
+ p4d = p4d_offset(pgd, vaddr);
+ if (p4d_none_or_clear_bad(p4d))
+ return 0;
+ pud = pud_offset(p4d, vaddr);
+ if (pud_none_or_clear_bad(pud))
+ return 0;
+ pmd = pmd_offset(pud, vaddr);
+ if (pmd_none_or_clear_bad(pmd))
+ return 0;
+ pte = pte_offset_map(pmd, vaddr);
+ if (!pte)
+ return 0;
+ pteval = pte_val(*pte);
+ pte_unmap(pte);
+ return pteval;
+}
+
+enum {
+ TLB_SUSPICIOUS = 1,
+ TLB_INSANE = 2,
+};
+
+static void tlb_insane(void)
+{
+ BUG_ON(1);
+}
+
+static void tlb_suspicious(void)
+{
+ WARN_ON(1);
+}
+
+/*
+ * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
+ * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
+ *
+ * Check that valid TLB entries either have the same PA as the PTE, or PTE is
+ * marked as non-present. Non-present PTE and the page with non-zero refcount
+ * and zero mapcount is normal for batched TLB flush operation. Zero refcount
+ * means that the page was freed prematurely. Non-zero mapcount is unusual,
+ * but does not necessary means an error, thus marked as suspicious.
+ */
+static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
+{
+ unsigned tlbidx = w | (e << PAGE_SHIFT);
+ unsigned r0 = dtlb ?
+ read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
+ unsigned r1 = dtlb ?
+ read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
+ unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
+ unsigned pte = get_pte_for_vaddr(vpn);
+ unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
+ unsigned tlb_asid = r0 & ASID_MASK;
+ bool kernel = tlb_asid == 1;
+ int rc = 0;
+
+ if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
+ pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
+ dtlb ? 'D' : 'I', w, e, vpn,
+ kernel ? "kernel" : "user");
+ rc |= TLB_INSANE;
+ }
+
+ if (tlb_asid == mm_asid) {
+ if ((pte ^ r1) & PAGE_MASK) {
+ pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
+ dtlb ? 'D' : 'I', w, e, r0, r1, pte);
+ if (pte == 0 || !pte_present(__pte(pte))) {
+ struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
+ pr_err("page refcount: %d, mapcount: %d\n",
+ page_count(p),
+ page_mapcount(p));
+ if (!page_count(p))
+ rc |= TLB_INSANE;
+ else if (page_mapcount(p))
+ rc |= TLB_SUSPICIOUS;
+ } else {
+ rc |= TLB_INSANE;
+ }
+ }
+ }
+ return rc;
+}
+
+void check_tlb_sanity(void)
+{
+ unsigned long flags;
+ unsigned w, e;
+ int bug = 0;
+
+ local_irq_save(flags);
+ for (w = 0; w < DTLB_ARF_WAYS; ++w)
+ for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
+ bug |= check_tlb_entry(w, e, true);
+ for (w = 0; w < ITLB_ARF_WAYS; ++w)
+ for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
+ bug |= check_tlb_entry(w, e, false);
+ if (bug & TLB_INSANE)
+ tlb_insane();
+ if (bug & TLB_SUSPICIOUS)
+ tlb_suspicious();
+ local_irq_restore(flags);
+}
+
+#endif /* CONFIG_DEBUG_TLB_SANITY */