summaryrefslogtreecommitdiffstats
path: root/arch/hexagon/mm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/hexagon/mm
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/hexagon/mm')
-rw-r--r--arch/hexagon/mm/Makefile7
-rw-r--r--arch/hexagon/mm/cache.c126
-rw-r--r--arch/hexagon/mm/copy_from_user.S101
-rw-r--r--arch/hexagon/mm/copy_to_user.S79
-rw-r--r--arch/hexagon/mm/copy_user_template.S172
-rw-r--r--arch/hexagon/mm/init.c278
-rw-r--r--arch/hexagon/mm/ioremap.c44
-rw-r--r--arch/hexagon/mm/uaccess.c45
-rw-r--r--arch/hexagon/mm/vm_fault.c173
-rw-r--r--arch/hexagon/mm/vm_tlb.c81
10 files changed, 1106 insertions, 0 deletions
diff --git a/arch/hexagon/mm/Makefile b/arch/hexagon/mm/Makefile
new file mode 100644
index 000000000..49911a906
--- /dev/null
+++ b/arch/hexagon/mm/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for Hexagon memory management subsystem
+#
+
+obj-y := init.o ioremap.o uaccess.o vm_fault.o cache.o
+obj-y += copy_to_user.o copy_from_user.o vm_tlb.o
diff --git a/arch/hexagon/mm/cache.c b/arch/hexagon/mm/cache.c
new file mode 100644
index 000000000..7e46f40c8
--- /dev/null
+++ b/arch/hexagon/mm/cache.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Cache management functions for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/hexagon_vm.h>
+
+#define spanlines(start, end) \
+ (((end - (start & ~(LINESIZE - 1))) >> LINEBITS) + 1)
+
+void flush_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long lines = spanlines(start, end-1);
+ unsigned long i, flags;
+
+ start &= ~(LINESIZE - 1);
+
+ local_irq_save(flags);
+
+ for (i = 0; i < lines; i++) {
+ __asm__ __volatile__ (
+ " dccleaninva(%0); "
+ :
+ : "r" (start)
+ );
+ start += LINESIZE;
+ }
+ local_irq_restore(flags);
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long lines = spanlines(start, end-1);
+ unsigned long i, flags;
+
+ start &= ~(LINESIZE - 1);
+
+ local_irq_save(flags);
+
+ for (i = 0; i < lines; i++) {
+ __asm__ __volatile__ (
+ " dccleana(%0); "
+ " icinva(%0); "
+ :
+ : "r" (start)
+ );
+ start += LINESIZE;
+ }
+ __asm__ __volatile__ (
+ "isync"
+ );
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void hexagon_clean_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long lines = spanlines(start, end-1);
+ unsigned long i, flags;
+
+ start &= ~(LINESIZE - 1);
+
+ local_irq_save(flags);
+
+ for (i = 0; i < lines; i++) {
+ __asm__ __volatile__ (
+ " dccleana(%0); "
+ :
+ : "r" (start)
+ );
+ start += LINESIZE;
+ }
+ local_irq_restore(flags);
+}
+
+void hexagon_inv_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long lines = spanlines(start, end-1);
+ unsigned long i, flags;
+
+ start &= ~(LINESIZE - 1);
+
+ local_irq_save(flags);
+
+ for (i = 0; i < lines; i++) {
+ __asm__ __volatile__ (
+ " dcinva(%0); "
+ :
+ : "r" (start)
+ );
+ start += LINESIZE;
+ }
+ local_irq_restore(flags);
+}
+
+
+
+
+/*
+ * This is just really brutal and shouldn't be used anyways,
+ * especially on V2. Left here just in case.
+ */
+void flush_cache_all_hexagon(void)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ __vmcache_ickill();
+ __vmcache_dckill();
+ __vmcache_l2kill();
+ local_irq_restore(flags);
+ mb();
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len)
+{
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC) {
+ flush_icache_range((unsigned long) dst,
+ (unsigned long) dst + len);
+ }
+}
diff --git a/arch/hexagon/mm/copy_from_user.S b/arch/hexagon/mm/copy_from_user.S
new file mode 100644
index 000000000..1a49bf24f
--- /dev/null
+++ b/arch/hexagon/mm/copy_from_user.S
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * User memory copy functions for kernel
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * The right way to do this involves valignb
+ * The easy way to do this is only speed up src/dest similar alignment.
+ */
+
+/*
+ * Copy to/from user are the same, except that for packets with a load and
+ * a store, I don't know how to tell which kind of exception we got.
+ * Therefore, we duplicate the function, and handle faulting addresses
+ * differently for each function
+ */
+
+/*
+ * copy from user: loads can fault
+ */
+#define src_sav r13
+#define dst_sav r12
+#define src_dst_sav r13:12
+#define d_dbuf r15:14
+#define w_dbuf r15
+
+#define dst r0
+#define src r1
+#define bytes r2
+#define loopcount r5
+
+#define FUNCNAME raw_copy_from_user
+#include "copy_user_template.S"
+
+ /* LOAD FAULTS from COPY_FROM_USER */
+
+ /* Alignment loop. r2 has been updated. Return it. */
+ .falign
+1009:
+2009:
+4009:
+ {
+ r0 = r2
+ jumpr r31
+ }
+ /* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
+ /* X - (A - B) == X + B - A */
+ .falign
+8089:
+ {
+ memd(dst) = d_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+ .falign
+4089:
+ {
+ memw(dst) = w_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+ .falign
+2089:
+ {
+ memh(dst) = w_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+ .falign
+1089:
+ {
+ memb(dst) = w_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+
+ /* COPY FROM USER: only loads can fail */
+
+ .section __ex_table,"a"
+ .long 1000b,1009b
+ .long 2000b,2009b
+ .long 4000b,4009b
+ .long 8080b,8089b
+ .long 4080b,4089b
+ .long 2080b,2089b
+ .long 1080b,1089b
+ .previous
diff --git a/arch/hexagon/mm/copy_to_user.S b/arch/hexagon/mm/copy_to_user.S
new file mode 100644
index 000000000..ed8e3cafb
--- /dev/null
+++ b/arch/hexagon/mm/copy_to_user.S
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * User memory copying routines for the Hexagon Kernel
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ */
+
+/* The right way to do this involves valignb
+ * The easy way to do this is only speed up src/dest similar alignment.
+ */
+
+/*
+ * Copy to/from user are the same, except that for packets with a load and
+ * a store, I don't know how to tell which kind of exception we got.
+ * Therefore, we duplicate the function, and handle faulting addresses
+ * differently for each function
+ */
+
+/*
+ * copy to user: stores can fault
+ */
+#define src_sav r13
+#define dst_sav r12
+#define src_dst_sav r13:12
+#define d_dbuf r15:14
+#define w_dbuf r15
+
+#define dst r0
+#define src r1
+#define bytes r2
+#define loopcount r5
+
+#define FUNCNAME raw_copy_to_user
+#include "copy_user_template.S"
+
+ /* STORE FAULTS from COPY_TO_USER */
+ .falign
+1109:
+2109:
+4109:
+ /* Alignment loop. r2 has been updated. Return it. */
+ {
+ r0 = r2
+ jumpr r31
+ }
+ /* Normal copy loops. Use dst-dst_sav to compute distance */
+ /* dst holds best write, no need to unwind any loops */
+ /* X - (A - B) == X + B - A */
+ .falign
+8189:
+8199:
+4189:
+4199:
+2189:
+2199:
+1189:
+1199:
+ {
+ r2 += sub(dst_sav,dst)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+
+ /* COPY TO USER: only stores can fail */
+ .section __ex_table,"a"
+ .long 1100b,1109b
+ .long 2100b,2109b
+ .long 4100b,4109b
+ .long 8180b,8189b
+ .long 8190b,8199b
+ .long 4180b,4189b
+ .long 4190b,4199b
+ .long 2180b,2189b
+ .long 2190b,2199b
+ .long 1180b,1189b
+ .long 1190b,1199b
+ .previous
diff --git a/arch/hexagon/mm/copy_user_template.S b/arch/hexagon/mm/copy_user_template.S
new file mode 100644
index 000000000..d297df01b
--- /dev/null
+++ b/arch/hexagon/mm/copy_user_template.S
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ */
+
+/* Numerology:
+ * WXYZ
+ * W: width in bytes
+ * X: Load=0, Store=1
+ * Y: Location 0=preamble,8=loop,9=epilog
+ * Z: Location=0,handler=9
+ */
+ .text
+ .global FUNCNAME
+ .type FUNCNAME, @function
+ .p2align 5
+FUNCNAME:
+ {
+ p0 = cmp.gtu(bytes,#0)
+ if (!p0.new) jump:nt .Ldone
+ r3 = or(dst,src)
+ r4 = xor(dst,src)
+ }
+ {
+ p1 = cmp.gtu(bytes,#15)
+ p0 = bitsclr(r3,#7)
+ if (!p0.new) jump:nt .Loop_not_aligned_8
+ src_dst_sav = combine(src,dst)
+ }
+
+ {
+ loopcount = lsr(bytes,#3)
+ if (!p1) jump .Lsmall
+ }
+ p3=sp1loop0(.Loop8,loopcount)
+.Loop8:
+8080:
+8180:
+ {
+ if (p3) memd(dst++#8) = d_dbuf
+ d_dbuf = memd(src++#8)
+ }:endloop0
+8190:
+ {
+ memd(dst++#8) = d_dbuf
+ bytes -= asl(loopcount,#3)
+ jump .Lsmall
+ }
+
+.Loop_not_aligned_8:
+ {
+ p0 = bitsclr(r4,#7)
+ if (p0.new) jump:nt .Lalign
+ }
+ {
+ p0 = bitsclr(r3,#3)
+ if (!p0.new) jump:nt .Loop_not_aligned_4
+ p1 = cmp.gtu(bytes,#7)
+ }
+
+ {
+ if (!p1) jump .Lsmall
+ loopcount = lsr(bytes,#2)
+ }
+ p3=sp1loop0(.Loop4,loopcount)
+.Loop4:
+4080:
+4180:
+ {
+ if (p3) memw(dst++#4) = w_dbuf
+ w_dbuf = memw(src++#4)
+ }:endloop0
+4190:
+ {
+ memw(dst++#4) = w_dbuf
+ bytes -= asl(loopcount,#2)
+ jump .Lsmall
+ }
+
+.Loop_not_aligned_4:
+ {
+ p0 = bitsclr(r3,#1)
+ if (!p0.new) jump:nt .Loop_not_aligned
+ p1 = cmp.gtu(bytes,#3)
+ }
+
+ {
+ if (!p1) jump .Lsmall
+ loopcount = lsr(bytes,#1)
+ }
+ p3=sp1loop0(.Loop2,loopcount)
+.Loop2:
+2080:
+2180:
+ {
+ if (p3) memh(dst++#2) = w_dbuf
+ w_dbuf = memuh(src++#2)
+ }:endloop0
+2190:
+ {
+ memh(dst++#2) = w_dbuf
+ bytes -= asl(loopcount,#1)
+ jump .Lsmall
+ }
+
+.Loop_not_aligned: /* Works for as small as one byte */
+ p3=sp1loop0(.Loop1,bytes)
+.Loop1:
+1080:
+1180:
+ {
+ if (p3) memb(dst++#1) = w_dbuf
+ w_dbuf = memub(src++#1)
+ }:endloop0
+ /* Done */
+1190:
+ {
+ memb(dst) = w_dbuf
+ jumpr r31
+ r0 = #0
+ }
+
+.Lsmall:
+ {
+ p0 = cmp.gtu(bytes,#0)
+ if (p0.new) jump:nt .Loop_not_aligned
+ }
+.Ldone:
+ {
+ r0 = #0
+ jumpr r31
+ }
+ .falign
+.Lalign:
+1000:
+ {
+ if (p0.new) w_dbuf = memub(src)
+ p0 = tstbit(src,#0)
+ if (!p1) jump .Lsmall
+ }
+1100:
+ {
+ if (p0) memb(dst++#1) = w_dbuf
+ if (p0) bytes = add(bytes,#-1)
+ if (p0) src = add(src,#1)
+ }
+2000:
+ {
+ if (p0.new) w_dbuf = memuh(src)
+ p0 = tstbit(src,#1)
+ if (!p1) jump .Lsmall
+ }
+2100:
+ {
+ if (p0) memh(dst++#2) = w_dbuf
+ if (p0) bytes = add(bytes,#-2)
+ if (p0) src = add(src,#2)
+ }
+4000:
+ {
+ if (p0.new) w_dbuf = memw(src)
+ p0 = tstbit(src,#2)
+ if (!p1) jump .Lsmall
+ }
+4100:
+ {
+ if (p0) memw(dst++#4) = w_dbuf
+ if (p0) bytes = add(bytes,#-4)
+ if (p0) src = add(src,#4)
+ jump FUNCNAME
+ }
+ .size FUNCNAME,.-FUNCNAME
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
new file mode 100644
index 000000000..146115c9d
--- /dev/null
+++ b/arch/hexagon/mm/init.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Memory subsystem initialization for Hexagon
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <asm/atomic.h>
+#include <linux/highmem.h>
+#include <asm/tlb.h>
+#include <asm/sections.h>
+#include <asm/vm_mmu.h>
+
+/*
+ * Define a startpg just past the end of the kernel image and a lastpg
+ * that corresponds to the end of real or simulated platform memory.
+ */
+#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET))
+
+unsigned long bootmem_lastpg; /* Should be set by platform code */
+unsigned long __phys_offset; /* physical kernel offset >> 12 */
+
+/* Set as variable to limit PMD copies */
+int max_kernel_seg = 0x303;
+
+/* indicate pfn's of high memory */
+unsigned long highstart_pfn, highend_pfn;
+
+/* Default cache attribute for newly created page tables */
+unsigned long _dflt_cache_att = CACHEDEF;
+
+/*
+ * The current "generation" of kernel map, which should not roll
+ * over until Hell freezes over. Actual bound in years needs to be
+ * calculated to confirm.
+ */
+DEFINE_SPINLOCK(kmap_gen_lock);
+
+/* checkpatch says don't init this to 0. */
+unsigned long long kmap_generation;
+
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Fixes up more stuff for HIGHMEM
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
+ /* No idea where this is actually declared. Seems to evade LXR. */
+ memblock_free_all();
+
+ /*
+ * To-Do: someone somewhere should wipe out the bootmem map
+ * after we're done?
+ */
+
+ /*
+ * This can be moved to some more virtual-memory-specific
+ * initialization hook at some point. Set the init_mm
+ * descriptors "context" value to point to the initial
+ * kernel segment table's physical address.
+ */
+ init_mm.context.ptbase = __pa(init_mm.pgd);
+}
+
+void sync_icache_dcache(pte_t pte)
+{
+ unsigned long addr;
+ struct page *page;
+
+ page = pte_page(pte);
+ addr = (unsigned long) page_address(page);
+
+ __vmcache_idsync(addr, PAGE_SIZE);
+}
+
+/*
+ * In order to set up page allocator "nodes",
+ * somebody has to call free_area_init() for UMA.
+ *
+ * In this mode, we only have one pg_data_t
+ * structure: contig_mem_data.
+ */
+void __init paging_init(void)
+{
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
+
+ /*
+ * This is not particularly well documented anywhere, but
+ * give ZONE_NORMAL all the memory, including the big holes
+ * left by the kernel+bootmem_map which are already left as reserved
+ * in the bootmem_map; free_area_init should see those bits and
+ * adjust accordingly.
+ */
+
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
+
+ free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */
+
+ /*
+ * Start of high memory area. Will probably need something more
+ * fancy if we... get more fancy.
+ */
+ high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
+}
+
+#ifndef DMA_RESERVE
+#define DMA_RESERVE (4)
+#endif
+
+#define DMA_CHUNKSIZE (1<<22)
+#define DMA_RESERVED_BYTES (DMA_RESERVE * DMA_CHUNKSIZE)
+
+/*
+ * Pick out the memory size. We look for mem=size,
+ * where size is "size[KkMm]"
+ */
+static int __init early_mem(char *p)
+{
+ unsigned long size;
+ char *endp;
+
+ size = memparse(p, &endp);
+
+ bootmem_lastpg = PFN_DOWN(size);
+
+ return 0;
+}
+early_param("mem", early_mem);
+
+size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22);
+
+void __init setup_arch_memory(void)
+{
+ /* XXX Todo: this probably should be cleaned up */
+ u32 *segtable = (u32 *) &swapper_pg_dir[0];
+ u32 *segtable_end;
+
+ /*
+ * Set up boot memory allocator
+ *
+ * The Gorman book also talks about these functions.
+ * This needs to change for highmem setups.
+ */
+
+ /* Prior to this, bootmem_lastpg is actually mem size */
+ bootmem_lastpg += ARCH_PFN_OFFSET;
+
+ /* Memory size needs to be a multiple of 16M */
+ bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) &
+ ~((BIG_KERNEL_PAGE_SIZE) - 1));
+
+ memblock_add(PHYS_OFFSET,
+ (bootmem_lastpg - ARCH_PFN_OFFSET) << PAGE_SHIFT);
+
+ /* Reserve kernel text/data/bss */
+ memblock_reserve(PHYS_OFFSET,
+ (bootmem_startpg - ARCH_PFN_OFFSET) << PAGE_SHIFT);
+ /*
+ * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
+ * memory allocation
+ */
+ max_low_pfn = bootmem_lastpg - PFN_DOWN(DMA_RESERVED_BYTES);
+ min_low_pfn = ARCH_PFN_OFFSET;
+ memblock_reserve(PFN_PHYS(max_low_pfn), DMA_RESERVED_BYTES);
+
+ printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg);
+ printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg);
+ printk(KERN_INFO "min_low_pfn: 0x%08lx\n", min_low_pfn);
+ printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn);
+
+ /*
+ * The default VM page tables (will be) populated with
+ * VA=PA+PAGE_OFFSET mapping. We go in and invalidate entries
+ * higher than what we have memory for.
+ */
+
+ /* this is pointer arithmetic; each entry covers 4MB */
+ segtable = segtable + (PAGE_OFFSET >> 22);
+
+ /* this actually only goes to the end of the first gig */
+ segtable_end = segtable + (1<<(30-22));
+
+ /*
+ * Move forward to the start of empty pages; take into account
+ * phys_offset shift.
+ */
+
+ segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT);
+ {
+ int i;
+
+ for (i = 1 ; i <= DMA_RESERVE ; i++)
+ segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
+ | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
+ | __HEXAGON_C_UNC << 6
+ | __HVM_PDE_S_4MB);
+ }
+
+ printk(KERN_INFO "clearing segtable from %p to %p\n", segtable,
+ segtable_end);
+ while (segtable < (segtable_end-8))
+ *(segtable++) = __HVM_PDE_S_INVALID;
+ /* stop the pointer at the device I/O 4MB page */
+
+ printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n",
+ segtable);
+
+#if 0
+ /* Other half of the early device table from vm_init_segtable. */
+ printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n",
+ (unsigned long) _K_init_devicetable-PAGE_OFFSET);
+ *segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) |
+ __HVM_PDE_S_4KB;
+ printk(KERN_INFO "*segtable = 0x%08x\n", *segtable);
+#endif
+
+ /*
+ * The bootmem allocator seemingly just lives to feed memory
+ * to the paging system
+ */
+ printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE);
+ paging_init(); /* See Gorman Book, 2.3 */
+
+ /*
+ * At this point, the page allocator is kind of initialized, but
+ * apparently no pages are available (just like with the bootmem
+ * allocator), and need to be freed themselves via mem_init(),
+ * which is called by start_kernel() later on in the process
+ */
+}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ CACHEDEF),
+ [VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | CACHEDEF),
+ [VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ CACHEDEF),
+ [VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | CACHEDEF),
+ [VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | CACHEDEF),
+ [VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_READ |
+ CACHEDEF),
+ [VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | CACHEDEF),
+ [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_READ |
+ CACHEDEF),
+ [VM_SHARED] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ CACHEDEF),
+ [VM_SHARED | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | CACHEDEF),
+ [VM_SHARED | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_WRITE | CACHEDEF),
+ [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | _PAGE_WRITE |
+ CACHEDEF),
+ [VM_SHARED | VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | CACHEDEF),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_READ |
+ CACHEDEF),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_WRITE |
+ CACHEDEF),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | _PAGE_EXECUTE |
+ _PAGE_WRITE | CACHEDEF)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c
new file mode 100644
index 000000000..255c5b1ee
--- /dev/null
+++ b/arch/hexagon/mm/ioremap.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * I/O remap functions for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
+{
+ unsigned long last_addr, addr;
+ unsigned long offset = phys_addr & ~PAGE_MASK;
+ struct vm_struct *area;
+
+ pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_READ|_PAGE_WRITE
+ |(__HEXAGON_C_DEV << 6));
+
+ last_addr = phys_addr + size - 1;
+
+ /* Wrapping not allowed */
+ if (!size || (last_addr < phys_addr))
+ return NULL;
+
+ /* Rounds up to next page size, including whole-page offset */
+ size = PAGE_ALIGN(offset + size);
+
+ area = get_vm_area(size, VM_IOREMAP);
+ addr = (unsigned long)area->addr;
+
+ if (ioremap_page_range(addr, addr+size, phys_addr, prot)) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+
+ return (void __iomem *) (offset + addr);
+}
+
+void iounmap(const volatile void __iomem *addr)
+{
+ vunmap((void *) ((unsigned long) addr & PAGE_MASK));
+}
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
new file mode 100644
index 000000000..650bca92f
--- /dev/null
+++ b/arch/hexagon/mm/uaccess.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Support for user memory access from kernel. This will
+ * probably be inlined for performance at some point, but
+ * for ease of debug, and to a lesser degree for code size,
+ * we implement here as subroutines.
+ */
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/pgtable.h>
+
+/*
+ * For clear_user(), exploit previously defined copy_to_user function
+ * and the fact that we've got a handy zero page defined in kernel/head.S
+ *
+ * dczero here would be even faster.
+ */
+__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
+{
+ long uncleared;
+
+ while (count > PAGE_SIZE) {
+ uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
+ if (uncleared)
+ return count - (PAGE_SIZE - uncleared);
+ count -= PAGE_SIZE;
+ dest += PAGE_SIZE;
+ }
+ if (count)
+ count = raw_copy_to_user(dest, &empty_zero_page, count);
+
+ return count;
+}
+
+unsigned long clear_user_hexagon(void __user *dest, unsigned long count)
+{
+ if (!access_ok(dest, count))
+ return count;
+ else
+ return __clear_user_hexagon(dest, count);
+}
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
new file mode 100644
index 000000000..583b08727
--- /dev/null
+++ b/arch/hexagon/mm/vm_fault.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Memory fault handling for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Page fault handling for the Hexagon Virtual Machine.
+ * Can also be called by a native port emulating the HVM
+ * execptions.
+ */
+
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/signal.h>
+#include <linux/extable.h>
+#include <linux/hardirq.h>
+#include <linux/perf_event.h>
+
+/*
+ * Decode of hardware exception sends us to one of several
+ * entry points. At each, we generate canonical arguments
+ * for handling by the abstract memory management code.
+ */
+#define FLT_IFETCH -1
+#define FLT_LOAD 0
+#define FLT_STORE 1
+
+
+/*
+ * Canonical page fault handler
+ */
+void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ int si_signo;
+ int si_code = SEGV_MAPERR;
+ vm_fault_t fault;
+ const struct exception_table_entry *fixup;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
+
+ /*
+ * If we're in an interrupt or have no user context,
+ * then must not take the fault.
+ */
+ if (unlikely(in_interrupt() || !mm))
+ goto no_context;
+
+ local_irq_enable();
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+retry:
+ vma = lock_mm_and_find_vma(mm, address, regs);
+ if (unlikely(!vma))
+ goto bad_area_nosemaphore;
+
+ /* Address space is OK. Now check access rights. */
+ si_code = SEGV_ACCERR;
+
+ switch (cause) {
+ case FLT_IFETCH:
+ if (!(vma->vm_flags & VM_EXEC))
+ goto bad_area;
+ break;
+ case FLT_LOAD:
+ if (!(vma->vm_flags & VM_READ))
+ goto bad_area;
+ break;
+ case FLT_STORE:
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
+ break;
+ }
+
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ if (fault_signal_pending(fault, regs))
+ return;
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
+ /* The most common case -- we are done. */
+ if (likely(!(fault & VM_FAULT_ERROR))) {
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
+
+ mmap_read_unlock(mm);
+ return;
+ }
+
+ mmap_read_unlock(mm);
+
+ /* Handle copyin/out exception cases */
+ if (!user_mode(regs))
+ goto no_context;
+
+ if (fault & VM_FAULT_OOM) {
+ pagefault_out_of_memory();
+ return;
+ }
+
+ /* User-mode address is in the memory map, but we are
+ * unable to fix up the page fault.
+ */
+ if (fault & VM_FAULT_SIGBUS) {
+ si_signo = SIGBUS;
+ si_code = BUS_ADRERR;
+ }
+ /* Address is not in the memory map */
+ else {
+ si_signo = SIGSEGV;
+ si_code = SEGV_ACCERR;
+ }
+ force_sig_fault(si_signo, si_code, (void __user *)address);
+ return;
+
+bad_area:
+ mmap_read_unlock(mm);
+
+bad_area_nosemaphore:
+ if (user_mode(regs)) {
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ return;
+ }
+ /* Kernel-mode fault falls through */
+
+no_context:
+ fixup = search_exception_tables(pt_elr(regs));
+ if (fixup) {
+ pt_set_elr(regs, fixup->fixup);
+ return;
+ }
+
+ /* Things are looking very, very bad now */
+ bust_spinlocks(1);
+ printk(KERN_EMERG "Unable to handle kernel paging request at "
+ "virtual address 0x%08lx, regs %p\n", address, regs);
+ die("Bad Kernel VA", regs, SIGKILL);
+}
+
+
+void read_protection_fault(struct pt_regs *regs)
+{
+ unsigned long badvadr = pt_badva(regs);
+
+ do_page_fault(badvadr, FLT_LOAD, regs);
+}
+
+void write_protection_fault(struct pt_regs *regs)
+{
+ unsigned long badvadr = pt_badva(regs);
+
+ do_page_fault(badvadr, FLT_STORE, regs);
+}
+
+void execute_protection_fault(struct pt_regs *regs)
+{
+ unsigned long badvadr = pt_badva(regs);
+
+ do_page_fault(badvadr, FLT_IFETCH, regs);
+}
diff --git a/arch/hexagon/mm/vm_tlb.c b/arch/hexagon/mm/vm_tlb.c
new file mode 100644
index 000000000..53482f2a9
--- /dev/null
+++ b/arch/hexagon/mm/vm_tlb.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Hexagon Virtual Machine TLB functions
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * The Hexagon Virtual Machine conceals the real workings of
+ * the TLB, but there are one or two functions that need to
+ * be instantiated for it, differently from a native build.
+ */
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/page.h>
+#include <asm/hexagon_vm.h>
+
+/*
+ * Initial VM implementation has only one map active at a time, with
+ * TLB purgings on changes. So either we're nuking the current map,
+ * or it's a no-op. This operation is messy on true SMPs where other
+ * processors must be induced to flush the copies in their local TLBs,
+ * but Hexagon thread-based virtual processors share the same MMU.
+ */
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (mm->context.ptbase == current->active_mm->context.ptbase)
+ __vmclrmap((void *)start, end - start);
+}
+
+/*
+ * Flush a page from the kernel virtual map - used by highmem
+ */
+void flush_tlb_one(unsigned long vaddr)
+{
+ __vmclrmap((void *)vaddr, PAGE_SIZE);
+}
+
+/*
+ * Flush all TLBs across all CPUs, virtual or real.
+ * A single Hexagon core has 6 thread contexts but
+ * only one TLB.
+ */
+void tlb_flush_all(void)
+{
+ /* should probably use that fixaddr end or whateve label */
+ __vmclrmap(0, 0xffff0000);
+}
+
+/*
+ * Flush TLB entries associated with a given mm_struct mapping.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ /* Current Virtual Machine has only one map active at a time */
+ if (current->active_mm->context.ptbase == mm->context.ptbase)
+ tlb_flush_all();
+}
+
+/*
+ * Flush TLB state associated with a page of a vma.
+ */
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (mm->context.ptbase == current->active_mm->context.ptbase)
+ __vmclrmap((void *)vaddr, PAGE_SIZE);
+}
+
+/*
+ * Flush TLB entries associated with a kernel address range.
+ * Like flush range, but without the check on the vma->vm_mm.
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ __vmclrmap((void *)start, end - start);
+}