summaryrefslogtreecommitdiffstats
path: root/arch/arc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/Makefile8
-rw-r--r--arch/arc/mm/cache.c1218
-rw-r--r--arch/arc/mm/dma.c106
-rw-r--r--arch/arc/mm/extable.c24
-rw-r--r--arch/arc/mm/fault.c193
-rw-r--r--arch/arc/mm/highmem.c73
-rw-r--r--arch/arc/mm/init.c202
-rw-r--r--arch/arc/mm/ioremap.c64
-rw-r--r--arch/arc/mm/mmap.c96
-rw-r--r--arch/arc/mm/tlb.c761
-rw-r--r--arch/arc/mm/tlbex.S378
11 files changed, 3123 insertions, 0 deletions
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
new file mode 100644
index 0000000000..633a773369
--- /dev/null
+++ b/arch/arc/mm/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+
+obj-y := extable.o ioremap.o dma.o fault.o init.o
+obj-y += tlb.o tlbex.o cache.o mmap.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
new file mode 100644
index 0000000000..f7e05c1466
--- /dev/null
+++ b/arch/arc/mm/cache.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARC Cache Management
+ *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/cache.h>
+#include <linux/mmu_context.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_ISA_ARCV2
+#define USE_RGN_FLSH 1
+#endif
+
+static int l2_line_sz;
+static int ioc_exists;
+int slc_enable = 1, ioc_enable = 1;
+unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
+unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
+
+static struct cpuinfo_arc_cache {
+ unsigned int sz_k, line_len, colors;
+} ic_info, dc_info, slc_info;
+
+void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op, const int full_page);
+
+void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
+void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
+void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
+
+static int read_decode_cache_bcr_arcv2(int c, char *buf, int len)
+{
+ struct cpuinfo_arc_cache *p_slc = &slc_info;
+ struct bcr_identity ident;
+ struct bcr_generic sbcr;
+ struct bcr_clust_cfg cbcr;
+ struct bcr_volatile vol;
+ int n = 0;
+
+ READ_BCR(ARC_REG_SLC_BCR, sbcr);
+ if (sbcr.ver) {
+ struct bcr_slc_cfg slc_cfg;
+ READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
+ p_slc->sz_k = 128 << slc_cfg.sz;
+ l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
+ n += scnprintf(buf + n, len - n,
+ "SLC\t\t: %uK, %uB Line%s\n",
+ p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable));
+ }
+
+ READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
+ if (cbcr.c) {
+ ioc_exists = 1;
+
+ /*
+ * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+ * simultaneously. This happens because as of today IOC aperture covers
+ * only ZONE_NORMAL (low mem) and any dma transactions outside this
+ * region won't be HW coherent.
+ * If we want to use both IOC and ZONE_HIGHMEM we can use
+ * bounce_buffer to handle dma transactions to HIGHMEM.
+ * Also it is possible to modify dma_direct cache ops or increase IOC
+ * aperture size if we are planning to use HIGHMEM without PAE.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
+ ioc_enable = 0;
+ } else {
+ ioc_enable = 0;
+ }
+
+ READ_BCR(AUX_IDENTITY, ident);
+
+ /* HS 2.0 didn't have AUX_VOL */
+ if (ident.family > 0x51) {
+ READ_BCR(AUX_VOL, vol);
+ perip_base = vol.start << 28;
+ /* HS 3.0 has limit and strict-ordering fields */
+ if (ident.family > 0x52)
+ perip_end = (vol.limit << 28) - 1;
+ }
+
+ n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
+ perip_base,
+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
+
+ return n;
+}
+
+int arc_cache_mumbojumbo(int c, char *buf, int len)
+{
+ struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info;
+ struct bcr_cache ibcr, dbcr;
+ int vipt, assoc;
+ int n = 0;
+
+ READ_BCR(ARC_REG_IC_BCR, ibcr);
+ if (!ibcr.ver)
+ goto dc_chk;
+
+ if (is_isa_arcompact() && (ibcr.ver <= 3)) {
+ BUG_ON(ibcr.config != 3);
+ assoc = 2; /* Fixed to 2w set assoc */
+ } else if (is_isa_arcv2() && (ibcr.ver >= 4)) {
+ assoc = 1 << ibcr.config; /* 1,2,4,8 */
+ }
+
+ p_ic->line_len = 8 << ibcr.line_len;
+ p_ic->sz_k = 1 << (ibcr.sz - 1);
+ p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
+
+ n += scnprintf(buf + n, len - n,
+ "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n",
+ p_ic->sz_k, assoc, p_ic->line_len,
+ p_ic->colors > 1 ? " aliasing" : "",
+ IS_USED_CFG(CONFIG_ARC_HAS_ICACHE));
+
+dc_chk:
+ READ_BCR(ARC_REG_DC_BCR, dbcr);
+ if (!dbcr.ver)
+ goto slc_chk;
+
+ if (is_isa_arcompact() && (dbcr.ver <= 3)) {
+ BUG_ON(dbcr.config != 2);
+ vipt = 1;
+ assoc = 4; /* Fixed to 4w set assoc */
+ p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
+ } else if (is_isa_arcv2() && (dbcr.ver >= 4)) {
+ vipt = 0;
+ assoc = 1 << dbcr.config; /* 1,2,4,8 */
+ p_dc->colors = 1; /* PIPT so can't VIPT alias */
+ }
+
+ p_dc->line_len = 16 << dbcr.line_len;
+ p_dc->sz_k = 1 << (dbcr.sz - 1);
+
+ n += scnprintf(buf + n, len - n,
+ "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
+ p_dc->sz_k, assoc, p_dc->line_len,
+ vipt ? "VIPT" : "PIPT",
+ p_dc->colors > 1 ? " aliasing" : "",
+ IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
+
+slc_chk:
+ if (is_isa_arcv2())
+ n += read_decode_cache_bcr_arcv2(c, buf + n, len - n);
+
+ return n;
+}
+
+/*
+ * Line Operation on {I,D}-Cache
+ */
+
+#define OP_INV 0x1
+#define OP_FLUSH 0x2
+#define OP_FLUSH_N_INV 0x3
+#define OP_INV_IC 0x4
+
+/*
+ * Cache Flush programming model
+ *
+ * ARC700 MMUv3 I$ and D$ are both VIPT and can potentially alias.
+ * Programming model requires both paddr and vaddr irrespecive of aliasing
+ * considerations:
+ * - vaddr in {I,D}C_IV?L
+ * - paddr in {I,D}C_PTAG
+ *
+ * In HS38x (MMUv4), D$ is PIPT, I$ is VIPT and can still alias.
+ * Programming model is different for aliasing vs. non-aliasing I$
+ * - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L
+ * - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$)
+ *
+ * - If PAE40 is enabled, independent of aliasing considerations, the higher
+ * bits needs to be written into PTAG_HI
+ */
+
+static inline
+void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op, const int full_page)
+{
+ unsigned int aux_cmd, aux_tag;
+ int num_lines;
+
+ if (op == OP_INV_IC) {
+ aux_cmd = ARC_REG_IC_IVIL;
+ aux_tag = ARC_REG_IC_PTAG;
+ } else {
+ aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
+ aux_tag = ARC_REG_DC_PTAG;
+ }
+
+ /* Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @paddr - aligned to cache line and integral @num_lines.
+ * This however can be avoided for page sized since:
+ * -@paddr will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!full_page) {
+ sz += paddr & ~CACHE_LINE_MASK;
+ paddr &= CACHE_LINE_MASK;
+ vaddr &= CACHE_LINE_MASK;
+ }
+ num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
+
+ /*
+ * MMUv3, cache ops require paddr in PTAG reg
+ * if V-P const for loop, PTAG can be written once outside loop
+ */
+ if (full_page)
+ write_aux_reg(aux_tag, paddr);
+
+ /*
+ * This is technically for MMU v4, using the MMU v3 programming model
+ * Special work for HS38 aliasing I-cache configuration with PAE40
+ * - upper 8 bits of paddr need to be written into PTAG_HI
+ * - (and needs to be written before the lower 32 bits)
+ * Note that PTAG_HI is hoisted outside the line loop
+ */
+ if (is_pae40_enabled() && op == OP_INV_IC)
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+
+ while (num_lines-- > 0) {
+ if (!full_page) {
+ write_aux_reg(aux_tag, paddr);
+ paddr += L1_CACHE_BYTES;
+ }
+
+ write_aux_reg(aux_cmd, vaddr);
+ vaddr += L1_CACHE_BYTES;
+ }
+}
+
+#ifndef USE_RGN_FLSH
+
+/*
+ */
+static inline
+void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op, const int full_page)
+{
+ unsigned int aux_cmd;
+ int num_lines;
+
+ if (op == OP_INV_IC) {
+ aux_cmd = ARC_REG_IC_IVIL;
+ } else {
+ /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
+ aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
+ }
+
+ /* Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @paddr - aligned to cache line and integral @num_lines.
+ * This however can be avoided for page sized since:
+ * -@paddr will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!full_page) {
+ sz += paddr & ~CACHE_LINE_MASK;
+ paddr &= CACHE_LINE_MASK;
+ }
+
+ num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
+
+ /*
+ * For HS38 PAE40 configuration
+ * - upper 8 bits of paddr need to be written into PTAG_HI
+ * - (and needs to be written before the lower 32 bits)
+ */
+ if (is_pae40_enabled()) {
+ if (op == OP_INV_IC)
+ /*
+ * Non aliasing I-cache in HS38,
+ * aliasing I-cache handled in __cache_line_loop_v3()
+ */
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+ else
+ write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
+ }
+
+ while (num_lines-- > 0) {
+ write_aux_reg(aux_cmd, paddr);
+ paddr += L1_CACHE_BYTES;
+ }
+}
+
+#else
+
+/*
+ * optimized flush operation which takes a region as opposed to iterating per line
+ */
+static inline
+void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op, const int full_page)
+{
+ unsigned int s, e;
+
+ /* Only for Non aliasing I-cache in HS38 */
+ if (op == OP_INV_IC) {
+ s = ARC_REG_IC_IVIR;
+ e = ARC_REG_IC_ENDR;
+ } else {
+ s = ARC_REG_DC_STARTR;
+ e = ARC_REG_DC_ENDR;
+ }
+
+ if (!full_page) {
+ /* for any leading gap between @paddr and start of cache line */
+ sz += paddr & ~CACHE_LINE_MASK;
+ paddr &= CACHE_LINE_MASK;
+
+ /*
+ * account for any trailing gap to end of cache line
+ * this is equivalent to DIV_ROUND_UP() in line ops above
+ */
+ sz += L1_CACHE_BYTES - 1;
+ }
+
+ if (is_pae40_enabled()) {
+ /* TBD: check if crossing 4TB boundary */
+ if (op == OP_INV_IC)
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+ else
+ write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
+ }
+
+ /* ENDR needs to be set ahead of START */
+ write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
+ write_aux_reg(s, paddr);
+
+ /* caller waits on DC_CTRL.FS */
+}
+
+#endif
+
+#ifdef CONFIG_ARC_MMU_V3
+#define __cache_line_loop __cache_line_loop_v3
+#else
+#define __cache_line_loop __cache_line_loop_v4
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+
+/***************************************************************
+ * Machine specific helpers for Entire D-Cache or Per Line ops
+ */
+
+#ifndef USE_RGN_FLSH
+/*
+ * this version avoids extra read/write of DC_CTRL for flush or invalid ops
+ * in the non region flush regime (such as for ARCompact)
+ */
+static inline void __before_dc_op(const int op)
+{
+ if (op == OP_FLUSH_N_INV) {
+ /* Dcache provides 2 cmd: FLUSH or INV
+ * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
+ * flush-n-inv is achieved by INV cmd but with IM=1
+ * So toggle INV sub-mode depending on op request and default
+ */
+ const unsigned int ctl = ARC_REG_DC_CTRL;
+ write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
+ }
+}
+
+#else
+
+static inline void __before_dc_op(const int op)
+{
+ const unsigned int ctl = ARC_REG_DC_CTRL;
+ unsigned int val = read_aux_reg(ctl);
+
+ if (op == OP_FLUSH_N_INV) {
+ val |= DC_CTRL_INV_MODE_FLUSH;
+ }
+
+ if (op != OP_INV_IC) {
+ /*
+ * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
+ * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
+ */
+ val &= ~DC_CTRL_RGN_OP_MSK;
+ if (op & OP_INV)
+ val |= DC_CTRL_RGN_OP_INV;
+ }
+ write_aux_reg(ctl, val);
+}
+
+#endif
+
+
+static inline void __after_dc_op(const int op)
+{
+ if (op & OP_FLUSH) {
+ const unsigned int ctl = ARC_REG_DC_CTRL;
+ unsigned int reg;
+
+ /* flush / flush-n-inv both wait */
+ while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
+ ;
+
+ /* Switch back to default Invalidate mode */
+ if (op == OP_FLUSH_N_INV)
+ write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
+ }
+}
+
+/*
+ * Operation on Entire D-Cache
+ * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
+ * Note that constant propagation ensures all the checks are gone
+ * in generated code
+ */
+static inline void __dc_entire_op(const int op)
+{
+ int aux;
+
+ __before_dc_op(op);
+
+ if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
+ aux = ARC_REG_DC_IVDC;
+ else
+ aux = ARC_REG_DC_FLSH;
+
+ write_aux_reg(aux, 0x1);
+
+ __after_dc_op(op);
+}
+
+static inline void __dc_disable(void)
+{
+ const int r = ARC_REG_DC_CTRL;
+
+ __dc_entire_op(OP_FLUSH_N_INV);
+ write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
+}
+
+static void __dc_enable(void)
+{
+ const int r = ARC_REG_DC_CTRL;
+
+ write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
+}
+
+/* For kernel mappings cache operation: index is same as paddr */
+#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
+
+/*
+ * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
+ */
+static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op)
+{
+ const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ __before_dc_op(op);
+
+ __cache_line_loop(paddr, vaddr, sz, op, full_page);
+
+ __after_dc_op(op);
+
+ local_irq_restore(flags);
+}
+
+#else
+
+#define __dc_entire_op(op)
+#define __dc_disable()
+#define __dc_enable()
+#define __dc_line_op(paddr, vaddr, sz, op)
+#define __dc_line_op_k(paddr, sz, op)
+
+#endif /* CONFIG_ARC_HAS_DCACHE */
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+
+static inline void __ic_entire_inv(void)
+{
+ write_aux_reg(ARC_REG_IC_IVIC, 1);
+ read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
+}
+
+static inline void
+__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz)
+{
+ const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
+ local_irq_restore(flags);
+}
+
+#ifndef CONFIG_SMP
+
+#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
+
+#else
+
+struct ic_inv_args {
+ phys_addr_t paddr, vaddr;
+ int sz;
+};
+
+static void __ic_line_inv_vaddr_helper(void *info)
+{
+ struct ic_inv_args *ic_inv = info;
+
+ __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
+}
+
+static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz)
+{
+ struct ic_inv_args ic_inv = {
+ .paddr = paddr,
+ .vaddr = vaddr,
+ .sz = sz
+ };
+
+ on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
+}
+
+#endif /* CONFIG_SMP */
+
+#else /* !CONFIG_ARC_HAS_ICACHE */
+
+#define __ic_entire_inv()
+#define __ic_line_inv_vaddr(pstart, vstart, sz)
+
+#endif /* CONFIG_ARC_HAS_ICACHE */
+
+static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
+{
+#ifdef CONFIG_ISA_ARCV2
+ /*
+ * SLC is shared between all cores and concurrent aux operations from
+ * multiple cores need to be serialized using a spinlock
+ * A concurrent operation can be silently ignored and/or the old/new
+ * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
+ * below)
+ */
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ unsigned int ctrl;
+ phys_addr_t end;
+
+ spin_lock_irqsave(&lock, flags);
+
+ /*
+ * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
+ * - b'000 (default) is Flush,
+ * - b'001 is Invalidate if CTRL.IM == 0
+ * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
+ */
+ ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
+
+ /* Don't rely on default value of IM bit */
+ if (!(op & OP_FLUSH)) /* i.e. OP_INV */
+ ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
+ else
+ ctrl |= SLC_CTRL_IM;
+
+ if (op & OP_INV)
+ ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
+ else
+ ctrl &= ~SLC_CTRL_RGN_OP_INV;
+
+ write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
+
+ /*
+ * Lower bits are ignored, no need to clip
+ * END needs to be setup before START (latter triggers the operation)
+ * END can't be same as START, so add (l2_line_sz - 1) to sz
+ */
+ end = paddr + sz + l2_line_sz - 1;
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
+
+ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
+
+ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
+
+ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
+ read_aux_reg(ARC_REG_SLC_CTRL);
+
+ while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
+
+ spin_unlock_irqrestore(&lock, flags);
+#endif
+}
+
+static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
+{
+#ifdef CONFIG_ISA_ARCV2
+ /*
+ * SLC is shared between all cores and concurrent aux operations from
+ * multiple cores need to be serialized using a spinlock
+ * A concurrent operation can be silently ignored and/or the old/new
+ * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
+ * below)
+ */
+ static DEFINE_SPINLOCK(lock);
+
+ const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
+ unsigned int ctrl, cmd;
+ unsigned long flags;
+ int num_lines;
+
+ spin_lock_irqsave(&lock, flags);
+
+ ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
+
+ /* Don't rely on default value of IM bit */
+ if (!(op & OP_FLUSH)) /* i.e. OP_INV */
+ ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
+ else
+ ctrl |= SLC_CTRL_IM;
+
+ write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
+
+ cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
+
+ sz += paddr & ~SLC_LINE_MASK;
+ paddr &= SLC_LINE_MASK;
+
+ num_lines = DIV_ROUND_UP(sz, l2_line_sz);
+
+ while (num_lines-- > 0) {
+ write_aux_reg(cmd, paddr);
+ paddr += l2_line_sz;
+ }
+
+ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
+ read_aux_reg(ARC_REG_SLC_CTRL);
+
+ while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
+
+ spin_unlock_irqrestore(&lock, flags);
+#endif
+}
+
+#define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
+
+noinline static void slc_entire_op(const int op)
+{
+ unsigned int ctrl, r = ARC_REG_SLC_CTRL;
+
+ ctrl = read_aux_reg(r);
+
+ if (!(op & OP_FLUSH)) /* i.e. OP_INV */
+ ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
+ else
+ ctrl |= SLC_CTRL_IM;
+
+ write_aux_reg(r, ctrl);
+
+ if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
+ write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
+ else
+ write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
+
+ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
+ read_aux_reg(r);
+
+ /* Important to wait for flush to complete */
+ while (read_aux_reg(r) & SLC_CTRL_BUSY);
+}
+
+static inline void arc_slc_disable(void)
+{
+ const int r = ARC_REG_SLC_CTRL;
+
+ slc_entire_op(OP_FLUSH_N_INV);
+ write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
+}
+
+static inline void arc_slc_enable(void)
+{
+ const int r = ARC_REG_SLC_CTRL;
+
+ write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
+}
+
+/***********************************************************
+ * Exported APIs
+ */
+
+/*
+ * Handle cache congruency of kernel and userspace mappings of page when kernel
+ * writes-to/reads-from
+ *
+ * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
+ * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
+ * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
+ * -In SMP, if hardware caches are coherent
+ *
+ * There's a corollary case, where kernel READs from a userspace mapped page.
+ * If the U-mapping is not congruent to K-mapping, former needs flushing.
+ */
+void flush_dcache_folio(struct folio *folio)
+{
+ struct address_space *mapping;
+
+ if (!cache_is_vipt_aliasing()) {
+ clear_bit(PG_dc_clean, &folio->flags);
+ return;
+ }
+
+ /* don't handle anon pages here */
+ mapping = folio_flush_mapping(folio);
+ if (!mapping)
+ return;
+
+ /*
+ * pagecache page, file not yet mapped to userspace
+ * Make a note that K-mapping is dirty
+ */
+ if (!mapping_mapped(mapping)) {
+ clear_bit(PG_dc_clean, &folio->flags);
+ } else if (folio_mapped(folio)) {
+ /* kernel reading from page with U-mapping */
+ phys_addr_t paddr = (unsigned long)folio_address(folio);
+ unsigned long vaddr = folio_pos(folio);
+
+ /*
+ * vaddr is not actually the virtual address, but is
+ * congruent to every user mapping.
+ */
+ if (addr_not_cache_congruent(paddr, vaddr))
+ __flush_dcache_pages(paddr, vaddr,
+ folio_nr_pages(folio));
+ }
+}
+EXPORT_SYMBOL(flush_dcache_folio);
+
+void flush_dcache_page(struct page *page)
+{
+ return flush_dcache_folio(page_folio(page));
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+/*
+ * DMA ops for systems with L1 cache only
+ * Make memory coherent with L1 cache by flushing/invalidating L1 lines
+ */
+static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
+}
+
+static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_INV);
+}
+
+static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_FLUSH);
+}
+
+/*
+ * DMA ops for systems with both L1 and L2 caches, but without IOC
+ * Both L1 and L2 lines need to be explicitly flushed/invalidated
+ */
+static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
+ slc_op(start, sz, OP_FLUSH_N_INV);
+}
+
+static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_INV);
+ slc_op(start, sz, OP_INV);
+}
+
+static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_FLUSH);
+ slc_op(start, sz, OP_FLUSH);
+}
+
+/*
+ * Exported DMA API
+ */
+void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
+{
+ __dma_cache_wback_inv(start, sz);
+}
+EXPORT_SYMBOL(dma_cache_wback_inv);
+
+void dma_cache_inv(phys_addr_t start, unsigned long sz)
+{
+ __dma_cache_inv(start, sz);
+}
+EXPORT_SYMBOL(dma_cache_inv);
+
+void dma_cache_wback(phys_addr_t start, unsigned long sz)
+{
+ __dma_cache_wback(start, sz);
+}
+EXPORT_SYMBOL(dma_cache_wback);
+
+/*
+ * This is API for making I/D Caches consistent when modifying
+ * kernel code (loadable modules, kprobes, kgdb...)
+ * This is called on insmod, with kernel virtual address for CODE of
+ * the module. ARC cache maintenance ops require PHY address thus we
+ * need to convert vmalloc addr to PHY addr
+ */
+void flush_icache_range(unsigned long kstart, unsigned long kend)
+{
+ unsigned int tot_sz;
+
+ WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
+
+ /* Shortcut for bigger flush ranges.
+ * Here we don't care if this was kernel virtual or phy addr
+ */
+ tot_sz = kend - kstart;
+ if (tot_sz > PAGE_SIZE) {
+ flush_cache_all();
+ return;
+ }
+
+ /* Case: Kernel Phy addr (0x8000_0000 onwards) */
+ if (likely(kstart > PAGE_OFFSET)) {
+ /*
+ * The 2nd arg despite being paddr will be used to index icache
+ * This is OK since no alternate virtual mappings will exist
+ * given the callers for this case: kprobe/kgdb in built-in
+ * kernel code only.
+ */
+ __sync_icache_dcache(kstart, kstart, kend - kstart);
+ return;
+ }
+
+ /*
+ * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
+ * (1) ARC Cache Maintenance ops only take Phy addr, hence special
+ * handling of kernel vaddr.
+ *
+ * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
+ * it still needs to handle a 2 page scenario, where the range
+ * straddles across 2 virtual pages and hence need for loop
+ */
+ while (tot_sz > 0) {
+ unsigned int off, sz;
+ unsigned long phy, pfn;
+
+ off = kstart % PAGE_SIZE;
+ pfn = vmalloc_to_pfn((void *)kstart);
+ phy = (pfn << PAGE_SHIFT) + off;
+ sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
+ __sync_icache_dcache(phy, kstart, sz);
+ kstart += sz;
+ tot_sz -= sz;
+ }
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+/*
+ * General purpose helper to make I and D cache lines consistent.
+ * @paddr is phy addr of region
+ * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
+ * However in one instance, when called by kprobe (for a breakpt in
+ * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
+ * use a paddr to index the cache (despite VIPT). This is fine since a
+ * builtin kernel page will not have any virtual mappings.
+ * kprobe on loadable module will be kernel vaddr.
+ */
+void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
+{
+ __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
+ __ic_line_inv_vaddr(paddr, vaddr, len);
+}
+
+/* wrapper to compile time eliminate alignment checks in flush loop */
+void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
+{
+ __ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE);
+}
+
+/*
+ * wrapper to clearout kernel or userspace mappings of a page
+ * For kernel mappings @vaddr == @paddr
+ */
+void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
+{
+ __dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV);
+}
+
+noinline void flush_cache_all(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ __ic_entire_inv();
+ __dc_entire_op(OP_FLUSH_N_INV);
+
+ local_irq_restore(flags);
+
+}
+
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+ flush_cache_all();
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
+ unsigned long pfn)
+{
+ phys_addr_t paddr = pfn << PAGE_SHIFT;
+
+ u_vaddr &= PAGE_MASK;
+
+ __flush_dcache_pages(paddr, u_vaddr, 1);
+
+ if (vma->vm_flags & VM_EXEC)
+ __inv_icache_pages(paddr, u_vaddr, 1);
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ flush_cache_all();
+}
+
+void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long u_vaddr)
+{
+ /* TBD: do we really need to clear the kernel mapping */
+ __flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1);
+ __flush_dcache_pages((phys_addr_t)page_address(page),
+ (phys_addr_t)page_address(page), 1);
+
+}
+
+#endif
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long u_vaddr, struct vm_area_struct *vma)
+{
+ struct folio *src = page_folio(from);
+ struct folio *dst = page_folio(to);
+ void *kfrom = kmap_atomic(from);
+ void *kto = kmap_atomic(to);
+ int clean_src_k_mappings = 0;
+
+ /*
+ * If SRC page was already mapped in userspace AND it's U-mapping is
+ * not congruent with K-mapping, sync former to physical page so that
+ * K-mapping in memcpy below, sees the right data
+ *
+ * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
+ * equally valid for SRC page as well
+ *
+ * For !VIPT cache, all of this gets compiled out as
+ * addr_not_cache_congruent() is 0
+ */
+ if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
+ __flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1);
+ clean_src_k_mappings = 1;
+ }
+
+ copy_page(kto, kfrom);
+
+ /*
+ * Mark DST page K-mapping as dirty for a later finalization by
+ * update_mmu_cache(). Although the finalization could have been done
+ * here as well (given that both vaddr/paddr are available).
+ * But update_mmu_cache() already has code to do that for other
+ * non copied user pages (e.g. read faults which wire in pagecache page
+ * directly).
+ */
+ clear_bit(PG_dc_clean, &dst->flags);
+
+ /*
+ * if SRC was already usermapped and non-congruent to kernel mapping
+ * sync the kernel mapping back to physical page
+ */
+ if (clean_src_k_mappings) {
+ __flush_dcache_pages((unsigned long)kfrom,
+ (unsigned long)kfrom, 1);
+ } else {
+ clear_bit(PG_dc_clean, &src->flags);
+ }
+
+ kunmap_atomic(kto);
+ kunmap_atomic(kfrom);
+}
+
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
+{
+ struct folio *folio = page_folio(page);
+ clear_page(to);
+ clear_bit(PG_dc_clean, &folio->flags);
+}
+EXPORT_SYMBOL(clear_user_page);
+
+/**********************************************************************
+ * Explicit Cache flush request from user space via syscall
+ * Needed for JITs which generate code on the fly
+ */
+SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
+{
+ /* TBD: optimize this */
+ flush_cache_all();
+ return 0;
+}
+
+/*
+ * IO-Coherency (IOC) setup rules:
+ *
+ * 1. Needs to be at system level, so only once by Master core
+ * Non-Masters need not be accessing caches at that time
+ * - They are either HALT_ON_RESET and kick started much later or
+ * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
+ * doesn't perturb caches or coherency unit
+ *
+ * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
+ * otherwise any straggler data might behave strangely post IOC enabling
+ *
+ * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
+ * Coherency transactions
+ */
+static noinline void __init arc_ioc_setup(void)
+{
+ unsigned int ioc_base, mem_sz;
+
+ /*
+ * If IOC was already enabled (due to bootloader) it technically needs to
+ * be reconfigured with aperture base,size corresponding to Linux memory map
+ * which will certainly be different than uboot's. But disabling and
+ * reenabling IOC when DMA might be potentially active is tricky business.
+ * To avoid random memory issues later, just panic here and ask user to
+ * upgrade bootloader to one which doesn't enable IOC
+ */
+ if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
+ panic("IOC already enabled, please upgrade bootloader!\n");
+
+ if (!ioc_enable)
+ return;
+
+ /* Flush + invalidate + disable L1 dcache */
+ __dc_disable();
+
+ /* Flush + invalidate SLC */
+ if (read_aux_reg(ARC_REG_SLC_BCR))
+ slc_entire_op(OP_FLUSH_N_INV);
+
+ /*
+ * currently IOC Aperture covers entire DDR
+ * TBD: fix for PGU + 1GB of low mem
+ * TBD: fix for PAE
+ */
+ mem_sz = arc_get_mem_sz();
+
+ if (!is_power_of_2(mem_sz) || mem_sz < 4096)
+ panic("IOC Aperture size must be power of 2 larger than 4KB");
+
+ /*
+ * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
+ * so setting 0x11 implies 512MB, 0x12 implies 1GB...
+ */
+ write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
+
+ /* for now assume kernel base is start of IOC aperture */
+ ioc_base = CONFIG_LINUX_RAM_BASE;
+
+ if (ioc_base % mem_sz != 0)
+ panic("IOC Aperture start must be aligned to the size of the aperture");
+
+ write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
+ write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
+ write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
+
+ /* Re-enable L1 dcache */
+ __dc_enable();
+}
+
+/*
+ * Cache related boot time checks/setups only needed on master CPU:
+ * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
+ * Assume SMP only, so all cores will have same cache config. A check on
+ * one core suffices for all
+ * - IOC setup / dma callbacks only need to be done once
+ */
+static noinline void __init arc_cache_init_master(void)
+{
+ if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
+ struct cpuinfo_arc_cache *ic = &ic_info;
+
+ if (!ic->line_len)
+ panic("cache support enabled but non-existent cache\n");
+
+ if (ic->line_len != L1_CACHE_BYTES)
+ panic("ICache line [%d] != kernel Config [%d]",
+ ic->line_len, L1_CACHE_BYTES);
+
+ /*
+ * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
+ * pair to provide vaddr/paddr respectively, just as in MMU v3
+ */
+ if (is_isa_arcv2() && ic->colors > 1)
+ _cache_line_loop_ic_fn = __cache_line_loop_v3;
+ else
+ _cache_line_loop_ic_fn = __cache_line_loop;
+ }
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
+ struct cpuinfo_arc_cache *dc = &dc_info;
+
+ if (!dc->line_len)
+ panic("cache support enabled but non-existent cache\n");
+
+ if (dc->line_len != L1_CACHE_BYTES)
+ panic("DCache line [%d] != kernel Config [%d]",
+ dc->line_len, L1_CACHE_BYTES);
+
+ /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
+ if (is_isa_arcompact()) {
+ int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
+
+ if (dc->colors > 1) {
+ if (!handled)
+ panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+ if (CACHE_COLORS_NUM != dc->colors)
+ panic("CACHE_COLORS_NUM not optimized for config\n");
+ } else if (handled && dc->colors == 1) {
+ panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+ }
+ }
+ }
+
+ /*
+ * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
+ * or equal to any cache line length.
+ */
+ BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
+ "SMP_CACHE_BYTES must be >= any cache line length");
+ if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
+ panic("L2 Cache line [%d] > kernel Config [%d]\n",
+ l2_line_sz, SMP_CACHE_BYTES);
+
+ /* Note that SLC disable not formally supported till HS 3.0 */
+ if (is_isa_arcv2() && l2_line_sz && !slc_enable)
+ arc_slc_disable();
+
+ if (is_isa_arcv2() && ioc_exists)
+ arc_ioc_setup();
+
+ if (is_isa_arcv2() && l2_line_sz && slc_enable) {
+ __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
+ __dma_cache_inv = __dma_cache_inv_slc;
+ __dma_cache_wback = __dma_cache_wback_slc;
+ } else {
+ __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
+ __dma_cache_inv = __dma_cache_inv_l1;
+ __dma_cache_wback = __dma_cache_wback_l1;
+ }
+ /*
+ * In case of IOC (say IOC+SLC case), pointers above could still be set
+ * but end up not being relevant as the first function in chain is not
+ * called at all for devices using coherent DMA.
+ * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
+ */
+}
+
+void __ref arc_cache_init(void)
+{
+ unsigned int __maybe_unused cpu = smp_processor_id();
+
+ if (!cpu)
+ arc_cache_init_master();
+
+ /*
+ * In PAE regime, TLB and cache maintenance ops take wider addresses
+ * And even if PAE is not enabled in kernel, the upper 32-bits still need
+ * to be zeroed to keep the ops sane.
+ * As an optimization for more common !PAE enabled case, zero them out
+ * once at init, rather than checking/setting to 0 for every runtime op
+ */
+ if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
+ write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
+ write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
+
+ if (l2_line_sz) {
+ write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
+ write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
+ }
+ }
+}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
new file mode 100644
index 0000000000..2a7fbbb83b
--- /dev/null
+++ b/arch/arc/mm/dma.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/dma-map-ops.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+
+/*
+ * ARCH specific callbacks for generic noncoherent DMA ops
+ * - hardware IOC not available (or "dma-coherent" not set for device in DT)
+ * - But still handle both coherent and non-coherent requests from caller
+ *
+ * For DMA coherent hardware (IOC) generic code suffices
+ */
+
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ /*
+ * Evict any existing L1 and/or L2 lines for the backing page
+ * in case it was used earlier as a normal "cached" page.
+ * Yeah this bit us - STAR 9000898266
+ *
+ * Although core does call flush_cache_vmap(), it gets kvaddr hence
+ * can't be used to efficiently flush L1 and/or L2 which need paddr
+ * Currently flush_cache_vmap nukes the L1 cache completely which
+ * will be optimized as a separate commit
+ */
+ dma_cache_wback_inv(page_to_phys(page), size);
+}
+
+/*
+ * Cache operations depending on function and direction argument, inspired by
+ * https://lore.kernel.org/lkml/20180518175004.GF17671@n2100.armlinux.org.uk
+ * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
+ * dma-mapping: provide a generic dma-noncoherent implementation)"
+ *
+ * | map == for_device | unmap == for_cpu
+ * |----------------------------------------------------------------
+ * TO_DEV | writeback writeback | none none
+ * FROM_DEV | invalidate invalidate | invalidate* invalidate*
+ * BIDIR | writeback+inv writeback+inv | invalidate invalidate
+ *
+ * [*] needed for CPU speculative prefetches
+ *
+ * NOTE: we don't check the validity of direction argument as it is done in
+ * upper layer functions (in include/linux/dma-mapping.h)
+ */
+
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ break;
+
+ /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ dma_cache_inv(paddr, size);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Plug in direct dma map ops.
+ */
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent)
+{
+ /*
+ * IOC hardware snoops all DMA traffic keeping the caches consistent
+ * with memory - eliding need for any explicit cache maintenance of
+ * DMA buffers.
+ */
+ if (is_isa_arcv2() && ioc_enable && coherent)
+ dev->dma_coherent = true;
+
+ dev_info(dev, "use %scoherent DMA ops\n",
+ dev->dma_coherent ? "" : "non");
+}
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
new file mode 100644
index 0000000000..88fa3a4d49
--- /dev/null
+++ b/arch/arc/mm/extable.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Borrowed heavily from MIPS
+ */
+
+#include <linux/export.h>
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+ if (fixup) {
+ regs->ret = fixup->fixup;
+
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
new file mode 100644
index 0000000000..95119a5e77
--- /dev/null
+++ b/arch/arc/mm/fault.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Page Fault Handling for ARC (TLB Miss / ProtV)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/signal.h>
+#include <linux/interrupt.h>
+#include <linux/sched/signal.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+#include <linux/perf_event.h>
+#include <linux/mm_types.h>
+#include <asm/entry.h>
+#include <asm/mmu.h>
+
+/*
+ * kernel virtual address is required to implement vmalloc/pkmap/fixmap
+ * Refer to asm/processor.h for System Memory Map
+ *
+ * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
+ * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
+ */
+noinline static int handle_kernel_vaddr_fault(unsigned long address)
+{
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+
+ pgd = pgd_offset(current->active_mm, address);
+ pgd_k = pgd_offset_k(address);
+
+ if (pgd_none (*pgd_k))
+ goto bad_area;
+ if (!pgd_present(*pgd))
+ set_pgd(pgd, *pgd_k);
+
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (p4d_none(*p4d_k))
+ goto bad_area;
+ if (!p4d_present(*p4d))
+ set_p4d(p4d, *p4d_k);
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
+ if (pud_none(*pud_k))
+ goto bad_area;
+ if (!pud_present(*pud))
+ set_pud(pud, *pud_k);
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (pmd_none(*pmd_k))
+ goto bad_area;
+ if (!pmd_present(*pmd))
+ set_pmd(pmd, *pmd_k);
+
+ /* XXX: create the TLB entry here */
+ return 0;
+
+bad_area:
+ return 1;
+}
+
+void do_page_fault(unsigned long address, struct pt_regs *regs)
+{
+ struct vm_area_struct *vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ int sig, si_code = SEGV_MAPERR;
+ unsigned int write = 0, exec = 0, mask;
+ vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
+ unsigned int flags; /* handle_mm_fault() input */
+
+ /*
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (address >= VMALLOC_START && !user_mode(regs)) {
+ if (unlikely(handle_kernel_vaddr_fault(address)))
+ goto no_context;
+ else
+ return;
+ }
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (faulthandler_disabled() || !mm)
+ goto no_context;
+
+ if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
+ write = 1;
+ else if ((regs->ecr.vec == ECR_V_PROTV) &&
+ (regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
+ exec = 1;
+
+ flags = FAULT_FLAG_DEFAULT;
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+ if (write)
+ flags |= FAULT_FLAG_WRITE;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+retry:
+ vma = lock_mm_and_find_vma(mm, address, regs);
+ if (!vma)
+ goto bad_area_nosemaphore;
+
+ /*
+ * vm_area is good, now check permissions for this memory access
+ */
+ mask = VM_READ;
+ if (write)
+ mask = VM_WRITE;
+ if (exec)
+ mask = VM_EXEC;
+
+ if (!(vma->vm_flags & mask)) {
+ si_code = SEGV_ACCERR;
+ goto bad_area;
+ }
+
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+ }
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
+ /*
+ * Fault retry nuances, mmap_lock already relinquished by core mm
+ */
+ if (unlikely(fault & VM_FAULT_RETRY)) {
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
+
+bad_area:
+ mmap_read_unlock(mm);
+
+bad_area_nosemaphore:
+ /*
+ * Major/minor page fault accounting
+ * (in case of retry we only land here once)
+ */
+ if (likely(!(fault & VM_FAULT_ERROR)))
+ /* Normal return path: fault Handled Gracefully */
+ return;
+
+ if (!user_mode(regs))
+ goto no_context;
+
+ if (fault & VM_FAULT_OOM) {
+ pagefault_out_of_memory();
+ return;
+ }
+
+ if (fault & VM_FAULT_SIGBUS) {
+ sig = SIGBUS;
+ si_code = BUS_ADRERR;
+ }
+ else {
+ sig = SIGSEGV;
+ }
+
+ tsk->thread.fault_address = address;
+ force_sig_fault(sig, si_code, (void __user *)address);
+ return;
+
+no_context:
+ if (fixup_exception(regs))
+ return;
+
+ die("Oops", regs, address);
+}
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
new file mode 100644
index 0000000000..c79912a6b1
--- /dev/null
+++ b/arch/arc/mm/highmem.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/memblock.h>
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <linux/pgtable.h>
+#include <asm/processor.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * HIGHMEM API:
+ *
+ * kmap() API provides sleep semantics hence referred to as "permanent maps"
+ * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
+ * for book-keeping
+ *
+ * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
+ * shortlived ala "temporary mappings" which historically were implemented as
+ * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
+ *
+ * Both these facts combined (preemption disabled and per-cpu allocation)
+ * means the total number of concurrent fixmaps will be limited to max
+ * such allocations in a single control path. Thus KM_TYPE_NR (another
+ * historic relic) is a small'ish number which caps max percpu fixmaps
+ *
+ * ARC HIGHMEM Details
+ *
+ * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
+ * is now shared between vmalloc and kmap (non overlapping though)
+ *
+ * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
+ * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
+ * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
+ *
+ * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
+ * CPU. So the number of CPUs sharing a single PTE page is limited.
+ *
+ * - pkmap being preemptible, in theory could do with more than 256 concurrent
+ * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
+ * the PGD and only works with a single page table @pkmap_page_table, hence
+ * sets the limit
+ */
+
+extern pte_t * pkmap_page_table;
+
+static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
+{
+ pmd_t *pmd_k = pmd_off_k(kvaddr);
+ pte_t *pte_k;
+
+ pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pte_k)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
+ pmd_populate_kernel(&init_mm, pmd_k, pte_k);
+ return pte_k;
+}
+
+void __init kmap_init(void)
+{
+ /* Due to recursive include hell, we can't do this in processor.h */
+ BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
+ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+ BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
+
+ pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
+ alloc_kmap_pgtable(FIXMAP_BASE);
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
new file mode 100644
index 0000000000..6a71b23f13
--- /dev/null
+++ b/arch/arc/mm/init.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/initrd.h>
+#endif
+#include <linux/of_fdt.h>
+#include <linux/swap.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/arcregs.h>
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
+char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+EXPORT_SYMBOL(empty_zero_page);
+
+static const unsigned long low_mem_start = CONFIG_LINUX_RAM_BASE;
+static unsigned long low_mem_sz;
+
+#ifdef CONFIG_HIGHMEM
+static unsigned long min_high_pfn, max_high_pfn;
+static phys_addr_t high_mem_start;
+static phys_addr_t high_mem_sz;
+unsigned long arch_pfn_offset;
+EXPORT_SYMBOL(arch_pfn_offset);
+#endif
+
+long __init arc_get_mem_sz(void)
+{
+ return low_mem_sz;
+}
+
+/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
+static int __init setup_mem_sz(char *str)
+{
+ low_mem_sz = memparse(str, NULL) & PAGE_MASK;
+
+ /* early console might not be setup yet - it will show up later */
+ pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(low_mem_sz));
+
+ return 0;
+}
+early_param("mem", setup_mem_sz);
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ int in_use = 0;
+
+ if (!low_mem_sz) {
+ if (base != low_mem_start)
+ panic("CONFIG_LINUX_RAM_BASE != DT memory { }");
+
+ low_mem_sz = size;
+ in_use = 1;
+ memblock_add_node(base, size, 0, MEMBLOCK_NONE);
+ } else {
+#ifdef CONFIG_HIGHMEM
+ high_mem_start = base;
+ high_mem_sz = size;
+ in_use = 1;
+ memblock_add_node(base, size, 1, MEMBLOCK_NONE);
+ memblock_reserve(base, size);
+#endif
+ }
+
+ pr_info("Memory @ %llx [%lldM] %s\n",
+ base, TO_MB(size), !in_use ? "Not used":"");
+}
+
+/*
+ * First memory setup routine called from setup_arch()
+ * 1. setup swapper's mm @init_mm
+ * 2. Count the pages we have and setup bootmem allocator
+ * 3. zone setup
+ */
+void __init setup_arch_memory(void)
+{
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
+
+ setup_initial_init_mm(_text, _etext, _edata, _end);
+
+ /* first page of system - kernel .vector starts here */
+ min_low_pfn = virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE);
+
+ /* Last usable page of low mem */
+ max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
+
+ /*------------- bootmem allocator setup -----------------------*/
+
+ /*
+ * seed the bootmem allocator after any DT memory node parsing or
+ * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
+ *
+ * Only low mem is added, otherwise we have crashes when allocating
+ * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
+ * avail memory, ending in highmem with a > 32-bit address. However
+ * it then tries to memset it with a truncaed 32-bit handle, causing
+ * the crash
+ */
+
+ memblock_reserve(CONFIG_LINUX_LINK_BASE,
+ __pa(_end) - CONFIG_LINUX_LINK_BASE);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (phys_initrd_size) {
+ memblock_reserve(phys_initrd_start, phys_initrd_size);
+ initrd_start = (unsigned long)__va(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
+ }
+#endif
+
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
+ memblock_dump_all();
+
+ /*----------------- node/zones setup --------------------------*/
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
+
+#ifdef CONFIG_HIGHMEM
+ /*
+ * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
+ * than addresses in normal aka low memory (0x8000_0000 based).
+ * Even with PAE, the huge peripheral space hole would waste a lot of
+ * mem with single contiguous mem_map[].
+ * Thus when HIGHMEM on ARC is enabled the memory map corresponding
+ * to the hole is freed and ARC specific version of pfn_valid()
+ * handles the hole in the memory map.
+ */
+
+ min_high_pfn = PFN_DOWN(high_mem_start);
+ max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+
+ /*
+ * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
+ * For HIGHMEM without PAE max_high_pfn should be less than
+ * min_low_pfn to guarantee that these two regions don't overlap.
+ * For PAE case highmem is greater than lowmem, so it is natural
+ * to use max_high_pfn.
+ *
+ * In both cases, holes should be handled by pfn_valid().
+ */
+ max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
+
+ high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
+
+ arch_pfn_offset = min(min_low_pfn, min_high_pfn);
+ kmap_init();
+
+#else /* CONFIG_HIGHMEM */
+ /* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */
+ max_mapnr = max_low_pfn - min_low_pfn;
+
+#endif /* CONFIG_HIGHMEM */
+
+ free_area_init(max_zone_pfn);
+}
+
+static void __init highmem_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long tmp;
+
+ memblock_phys_free(high_mem_start, high_mem_sz);
+ for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
+#endif
+}
+
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
+ memblock_free_all();
+ highmem_init();
+
+ BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
+}
+
+#ifdef CONFIG_HIGHMEM
+int pfn_valid(unsigned long pfn)
+{
+ return (pfn >= min_high_pfn && pfn <= max_high_pfn) ||
+ (pfn >= min_low_pfn && pfn <= max_low_pfn);
+}
+EXPORT_SYMBOL(pfn_valid);
+#endif
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
new file mode 100644
index 0000000000..b07004d532
--- /dev/null
+++ b/arch/arc/mm/ioremap.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/cache.h>
+
+static inline bool arc_uncached_addr_space(phys_addr_t paddr)
+{
+ if (is_isa_arcompact()) {
+ if (paddr >= ARC_UNCACHED_ADDR_SPACE)
+ return true;
+ } else if (paddr >= perip_base && paddr <= perip_end) {
+ return true;
+ }
+
+ return false;
+}
+
+void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
+{
+ /*
+ * If the region is h/w uncached, MMU mapping can be elided as optim
+ * The cast to u32 is fine as this region can only be inside 4GB
+ */
+ if (arc_uncached_addr_space(paddr))
+ return (void __iomem *)(u32)paddr;
+
+ return ioremap_prot(paddr, size,
+ pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+}
+EXPORT_SYMBOL(ioremap);
+
+/*
+ * ioremap with access flags
+ * Cache semantics wise it is same as ioremap - "forced" uncached.
+ * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
+ * ARC hardware uncached region, this one still goes thru the MMU as caller
+ * might need finer access control (R/W/X)
+ */
+void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
+ unsigned long flags)
+{
+ pgprot_t prot = __pgprot(flags);
+
+ /* force uncached */
+ return generic_ioremap_prot(paddr, size, pgprot_noncached(prot));
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+void iounmap(volatile void __iomem *addr)
+{
+ /* weird double cast to handle phys_addr_t > 32 bits */
+ if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
+ return;
+
+ generic_iounmap(addr);
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
new file mode 100644
index 0000000000..fce5fa2b4f
--- /dev/null
+++ b/arch/arc/mm/mmap.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARC700 mmap
+ *
+ * (started from arm version - for VIPT alias handling)
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+
+#include <asm/cacheflush.h>
+
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+ (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+/*
+ * Ensure that shared mappings are correctly aligned to
+ * avoid aliasing issues with VIPT caches.
+ * We need to ensure that
+ * a specific page of an object is always mapped at a multiple of
+ * SHMLBA bytes.
+ */
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
+ struct vm_unmapped_area_info info;
+
+ /*
+ * We only need to do colour alignment if D cache aliases.
+ */
+ if (aliasing)
+ do_align = filp || (flags & MAP_SHARED);
+
+ /*
+ * We enforce the MAP_FIXED case.
+ */
+ if (flags & MAP_FIXED) {
+ if (aliasing && flags & MAP_SHARED &&
+ (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ return vm_unmapped_area(&info);
+}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_U_NONE,
+ [VM_READ] = PAGE_U_R,
+ [VM_WRITE] = PAGE_U_R,
+ [VM_WRITE | VM_READ] = PAGE_U_R,
+ [VM_EXEC] = PAGE_U_X_R,
+ [VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED] = PAGE_U_NONE,
+ [VM_SHARED | VM_READ] = PAGE_U_R,
+ [VM_SHARED | VM_WRITE] = PAGE_U_W_R,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R,
+ [VM_SHARED | VM_EXEC] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
new file mode 100644
index 0000000000..e536b2dcd4
--- /dev/null
+++ b/arch/arc/mm/tlb.c
@@ -0,0 +1,761 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/bug.h>
+#include <linux/mm_types.h>
+
+#include <asm/arcregs.h>
+#include <asm/setup.h>
+#include <asm/mmu_context.h>
+#include <asm/mmu.h>
+
+/* A copy of the ASID from the PID reg is kept in asid_cache */
+DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
+
+static struct cpuinfo_arc_mmu {
+ unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
+} mmuinfo;
+
+/*
+ * Utility Routine to erase a J-TLB entry
+ * Caller needs to setup Index Reg (manually or via getIndex)
+ */
+static inline void __tlb_entry_erase(void)
+{
+ write_aux_reg(ARC_REG_TLBPD1, 0);
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
+ write_aux_reg(ARC_REG_TLBPD0, 0);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+}
+
+static void utlb_invalidate(void)
+{
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
+}
+
+#ifdef CONFIG_ARC_MMU_V3
+
+static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
+{
+ unsigned int idx;
+
+ write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
+
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ return idx;
+}
+
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
+{
+ unsigned int idx;
+
+ /* Locate the TLB entry for this vaddr + ASID */
+ idx = tlb_entry_lkup(vaddr_n_asid);
+
+ /* No error means entry found, zero it out */
+ if (likely(!(idx & TLB_LKUP_ERR))) {
+ __tlb_entry_erase();
+ } else {
+ /* Duplicate entry error */
+ WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
+ vaddr_n_asid);
+ }
+}
+
+static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
+{
+ unsigned int idx;
+
+ /*
+ * First verify if entry for this vaddr+ASID already exists
+ * This also sets up PD0 (vaddr, ASID..) for final commit
+ */
+ idx = tlb_entry_lkup(pd0);
+
+ /*
+ * If Not already present get a free slot from MMU.
+ * Otherwise, Probe would have located the entry and set INDEX Reg
+ * with existing location. This will cause Write CMD to over-write
+ * existing entry with new PD0 and PD1
+ */
+ if (likely(idx & TLB_LKUP_ERR))
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
+
+ /* setup the other half of TLB entry (pfn, rwx..) */
+ write_aux_reg(ARC_REG_TLBPD1, pd1);
+
+ /*
+ * Commit the Entry to MMU
+ * It doesn't sound safe to use the TLBWriteNI cmd here
+ * which doesn't flush uTLBs. I'd rather be safe than sorry.
+ */
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+}
+
+#else /* MMUv4 */
+
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
+{
+ write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
+}
+
+static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
+{
+ write_aux_reg(ARC_REG_TLBPD0, pd0);
+
+ if (!is_pae40_enabled()) {
+ write_aux_reg(ARC_REG_TLBPD1, pd1);
+ } else {
+ write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
+ write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
+ }
+
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
+}
+
+#endif
+
+/*
+ * Un-conditionally (without lookup) erase the entire MMU contents
+ */
+
+noinline void local_flush_tlb_all(void)
+{
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ unsigned long flags;
+ unsigned int entry;
+ int num_tlb = mmu->sets * mmu->ways;
+
+ local_irq_save(flags);
+
+ /* Load PD0 and PD1 with template for a Blank Entry */
+ write_aux_reg(ARC_REG_TLBPD1, 0);
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
+ write_aux_reg(ARC_REG_TLBPD0, 0);
+
+ for (entry = 0; entry < num_tlb; entry++) {
+ /* write this entry to the TLB */
+ write_aux_reg(ARC_REG_TLBINDEX, entry);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
+ }
+
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ const int stlb_idx = 0x800;
+
+ /* Blank sTLB entry */
+ write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
+
+ for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
+ write_aux_reg(ARC_REG_TLBINDEX, entry);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
+ }
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Flush the entire MM for userland. The fastest way is to move to Next ASID
+ */
+noinline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ /*
+ * Small optimisation courtesy IA64
+ * flush_mm called during fork,exit,munmap etc, multiple times as well.
+ * Only for fork( ) do we need to move parent to a new MMU ctxt,
+ * all other cases are NOPs, hence this check.
+ */
+ if (atomic_read(&mm->mm_users) == 0)
+ return;
+
+ /*
+ * - Move to a new ASID, but only if the mm is still wired in
+ * (Android Binder ended up calling this for vma->mm != tsk->mm,
+ * causing h/w - s/w ASID to get out of sync)
+ * - Also get_new_mmu_context() new implementation allocates a new
+ * ASID only if it is not allocated already - so unallocate first
+ */
+ destroy_context(mm);
+ if (current->mm == mm)
+ get_new_mmu_context(mm);
+}
+
+/*
+ * Flush a Range of TLB entries for userland.
+ * @start is inclusive, while @end is exclusive
+ * Difference between this and Kernel Range Flush is
+ * -Here the fastest way (if range is too large) is to move to next ASID
+ * without doing any explicit Shootdown
+ * -In case of kernel Flush, entry has to be shot down explicitly
+ */
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ const unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+
+ /* If range @start to @end is more than 32 TLB entries deep,
+ * its better to move to a new ASID rather than searching for
+ * individual entries and then shooting them down
+ *
+ * The calc above is rough, doesn't account for unaligned parts,
+ * since this is heuristics based anyways
+ */
+ if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+ local_flush_tlb_mm(vma->vm_mm);
+ return;
+ }
+
+ /*
+ * @start moved to page start: this alone suffices for checking
+ * loop end condition below, w/o need for aligning @end to end
+ * e.g. 2000 to 4001 will anyhow loop twice
+ */
+ start &= PAGE_MASK;
+
+ local_irq_save(flags);
+
+ if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
+ while (start < end) {
+ tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
+ start += PAGE_SIZE;
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
+ * @start, @end interpreted as kvaddr
+ * Interestingly, shared TLB entries can also be flushed using just
+ * @start,@end alone (interpreted as user vaddr), although technically SASID
+ * is also needed. However our smart TLbProbe lookup takes care of that.
+ */
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /* exactly same as above, except for TLB entry not taking ASID */
+
+ if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+ local_flush_tlb_all();
+ return;
+ }
+
+ start &= PAGE_MASK;
+
+ local_irq_save(flags);
+ while (start < end) {
+ tlb_entry_erase(start);
+ start += PAGE_SIZE;
+ }
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Delete TLB entry in MMU for a given page (??? address)
+ * NOTE One TLB entry contains translation for single PAGE
+ */
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ const unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+
+ /* Note that it is critical that interrupts are DISABLED between
+ * checking the ASID and using it flush the TLB entry
+ */
+ local_irq_save(flags);
+
+ if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
+ tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
+ }
+
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_SMP
+
+struct tlb_args {
+ struct vm_area_struct *ta_vma;
+ unsigned long ta_start;
+ unsigned long ta_end;
+};
+
+static inline void ipi_flush_tlb_page(void *arg)
+{
+ struct tlb_args *ta = arg;
+
+ local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+}
+
+static inline void ipi_flush_tlb_range(void *arg)
+{
+ struct tlb_args *ta = arg;
+
+ local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ipi_flush_pmd_tlb_range(void *arg)
+{
+ struct tlb_args *ta = arg;
+
+ local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+#endif
+
+static inline void ipi_flush_tlb_kernel_range(void *arg)
+{
+ struct tlb_args *ta = (struct tlb_args *)arg;
+
+ local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
+ mm, 1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ struct tlb_args ta = {
+ .ta_vma = vma,
+ .ta_start = uaddr
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct tlb_args ta = {
+ .ta_vma = vma,
+ .ta_start = start,
+ .ta_end = end
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct tlb_args ta = {
+ .ta_vma = vma,
+ .ta_start = start,
+ .ta_end = end
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
+}
+#endif
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct tlb_args ta = {
+ .ta_start = start,
+ .ta_end = end
+ };
+
+ on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
+}
+#endif
+
+/*
+ * Routine to create a TLB entry
+ */
+static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
+{
+ unsigned long flags;
+ unsigned int asid_or_sasid, rwx;
+ unsigned long pd0;
+ phys_addr_t pd1;
+
+ /*
+ * create_tlb() assumes that current->mm == vma->mm, since
+ * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
+ * -completes the lazy write to SASID reg (again valid for curr tsk)
+ *
+ * Removing the assumption involves
+ * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
+ * -More importantly it makes this handler inconsistent with fast-path
+ * TLB Refill handler which always deals with "current"
+ *
+ * Lets see the use cases when current->mm != vma->mm and we land here
+ * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
+ * Here VM wants to pre-install a TLB entry for user stack while
+ * current->mm still points to pre-execve mm (hence the condition).
+ * However the stack vaddr is soon relocated (randomization) and
+ * move_page_tables() tries to undo that TLB entry.
+ * Thus not creating TLB entry is not any worse.
+ *
+ * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
+ * breakpoint in debugged task. Not creating a TLB now is not
+ * performance critical.
+ *
+ * Both the cases above are not good enough for code churn.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+
+ vaddr &= PAGE_MASK;
+
+ /* update this PTE credentials */
+ pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
+
+ /* Create HW TLB(PD0,PD1) from PTE */
+
+ /* ASID for this task */
+ asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
+
+ /*
+ * ARC MMU provides fully orthogonal access bits for K/U mode,
+ * however Linux only saves 1 set to save PTE real-estate
+ * Here we convert 3 PTE bits into 6 MMU bits:
+ * -Kernel only entries have Kr Kw Kx 0 0 0
+ * -User entries have mirrored K and U bits
+ */
+ rwx = pte_val(*ptep) & PTE_BITS_RWX;
+
+ if (pte_val(*ptep) & _PAGE_GLOBAL)
+ rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
+ else
+ rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
+
+ pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
+
+ tlb_entry_insert(pd0, pd1);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Called at the end of pagefault, for a userspace mapped page
+ * -pre-install the corresponding TLB entry into MMU
+ * -Finalize the delayed D-cache flush of kernel mapping of page due to
+ * flush_dcache_page(), copy_user_page()
+ *
+ * Note that flush (when done) involves both WBACK - so physical page is
+ * in sync as well as INV - so any non-congruent aliases don't remain
+ */
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)
+{
+ unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+ phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
+ struct page *page = pfn_to_page(pte_pfn(*ptep));
+
+ create_tlb(vma, vaddr, ptep);
+
+ if (page == ZERO_PAGE(0)) {
+ return;
+ }
+
+ /*
+ * Exec page : Independent of aliasing/page-color considerations,
+ * since icache doesn't snoop dcache on ARC, any dirty
+ * K-mapping of a code page needs to be wback+inv so that
+ * icache fetch by userspace sees code correctly.
+ * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
+ * so userspace sees the right data.
+ * (Avoids the flush for Non-exec + congruent mapping case)
+ */
+ if ((vma->vm_flags & VM_EXEC) ||
+ addr_not_cache_congruent(paddr, vaddr)) {
+ struct folio *folio = page_folio(page);
+ int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
+ if (dirty) {
+ unsigned long offset = offset_in_folio(folio, paddr);
+ nr = folio_nr_pages(folio);
+ paddr -= offset;
+ vaddr -= offset;
+ /* wback + inv dcache lines (K-mapping) */
+ __flush_dcache_pages(paddr, paddr, nr);
+
+ /* invalidate any existing icache lines (U-mapping) */
+ if (vma->vm_flags & VM_EXEC)
+ __inv_icache_pages(paddr, vaddr, nr);
+ }
+ }
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+/*
+ * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
+ * support.
+ *
+ * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
+ * new bit "SZ" in TLB page descriptor to distinguish between them.
+ * Super Page size is configurable in hardware (4K to 16M), but fixed once
+ * RTL builds.
+ *
+ * The exact THP size a Linux configuration will support is a function of:
+ * - MMU page size (typical 8K, RTL fixed)
+ * - software page walker address split between PGD:PTE:PFN (typical
+ * 11:8:13, but can be changed with 1 line)
+ * So for above default, THP size supported is 8K * (2^8) = 2M
+ *
+ * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
+ * reduces to 1 level (as PTE is folded into PGD and canonically referred
+ * to as PMD).
+ * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
+ */
+
+void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd)
+{
+ pte_t pte = __pte(pmd_val(*pmd));
+ update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
+}
+
+void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned int cpu;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+
+ if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
+ unsigned int asid = hw_pid(vma->vm_mm, cpu);
+
+ /* No need to loop here: this will always be for 1 Huge Page */
+ tlb_entry_erase(start | _PAGE_HW_SZ | asid);
+ }
+
+ local_irq_restore(flags);
+}
+
+#endif
+
+/* Read the Cache Build Configuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation is done here, simply read/convert the BCRs
+ */
+int arc_mmu_mumbojumbo(int c, char *buf, int len)
+{
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ unsigned int bcr, u_dtlb, u_itlb, sasid;
+ struct bcr_mmu_3 *mmu3;
+ struct bcr_mmu_4 *mmu4;
+ char super_pg[64] = "";
+ int n = 0;
+
+ bcr = read_aux_reg(ARC_REG_MMU_BCR);
+ mmu->ver = (bcr >> 24);
+
+ if (is_isa_arcompact() && mmu->ver == 3) {
+ mmu3 = (struct bcr_mmu_3 *)&bcr;
+ mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
+ mmu->sets = 1 << mmu3->sets;
+ mmu->ways = 1 << mmu3->ways;
+ u_dtlb = mmu3->u_dtlb;
+ u_itlb = mmu3->u_itlb;
+ sasid = mmu3->sasid;
+ } else {
+ mmu4 = (struct bcr_mmu_4 *)&bcr;
+ mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
+ mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
+ mmu->sets = 64 << mmu4->n_entry;
+ mmu->ways = mmu4->n_ways * 2;
+ u_dtlb = mmu4->u_dtlb * 4;
+ u_itlb = mmu4->u_itlb * 4;
+ sasid = mmu4->sasid;
+ mmu->pae = mmu4->pae;
+ }
+
+ if (mmu->s_pg_sz_m)
+ scnprintf(super_pg, 64, "/%dM%s",
+ mmu->s_pg_sz_m,
+ IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
+
+ n += scnprintf(buf + n, len - n,
+ "MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
+ mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
+ mmu->sets, mmu->ways,
+ u_dtlb, u_itlb,
+ IS_AVAIL1(sasid, ", SASID"),
+ IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
+
+ return n;
+}
+
+int pae40_exist_but_not_enab(void)
+{
+ return mmuinfo.pae && !is_pae40_enabled();
+}
+
+void arc_mmu_init(void)
+{
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ int compat = 0;
+
+ /*
+ * Can't be done in processor.h due to header include dependencies
+ */
+ BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
+
+ /*
+ * stack top size sanity check,
+ * Can't be done in processor.h due to header include dependencies
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
+
+ /*
+ * Ensure that MMU features assumed by kernel exist in hardware.
+ * - For older ARC700 cpus, only v3 supported
+ * - For HS cpus, v4 was baseline and v5 is backwards compatible
+ * (will run older software).
+ */
+ if (is_isa_arcompact() && mmu->ver == 3)
+ compat = 1;
+ else if (is_isa_arcv2() && mmu->ver >= 4)
+ compat = 1;
+
+ if (!compat)
+ panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
+
+ if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
+ panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
+
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
+ panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
+ (unsigned long)TO_MB(HPAGE_PMD_SIZE));
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
+ panic("Hardware doesn't support PAE40\n");
+
+ /* Enable the MMU with ASID 0 */
+ mmu_setup_asid(NULL, 0);
+
+ /* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
+ mmu_setup_pgd(NULL, swapper_pg_dir);
+
+ if (pae40_exist_but_not_enab())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+}
+
+/*
+ * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
+ * The mapping is Column-first.
+ * --------------------- -----------
+ * |way0|way1|way2|way3| |way0|way1|
+ * --------------------- -----------
+ * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
+ * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
+ * ~ ~ ~ ~
+ * [set127] | 508| 509| 510| 511| | 254| 255|
+ * --------------------- -----------
+ * For normal operations we don't(must not) care how above works since
+ * MMU cmd getIndex(vaddr) abstracts that out.
+ * However for walking WAYS of a SET, we need to know this
+ */
+#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
+
+/* Handling of Duplicate PD (TLB entry) in MMU.
+ * -Could be due to buggy customer tapeouts or obscure kernel bugs
+ * -MMU complaints not at the time of duplicate PD installation, but at the
+ * time of lookup matching multiple ways.
+ * -Ideally these should never happen - but if they do - workaround by deleting
+ * the duplicate one.
+ * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
+ */
+volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
+
+void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ unsigned long flags;
+ int set, n_ways = mmu->ways;
+
+ n_ways = min(n_ways, 4);
+ BUG_ON(mmu->ways > 4);
+
+ local_irq_save(flags);
+
+ /* loop thru all sets of TLB */
+ for (set = 0; set < mmu->sets; set++) {
+
+ int is_valid, way;
+ unsigned int pd0[4];
+
+ /* read out all the ways of current set */
+ for (way = 0, is_valid = 0; way < n_ways; way++) {
+ write_aux_reg(ARC_REG_TLBINDEX,
+ SET_WAY_TO_IDX(mmu, set, way));
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
+ pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
+ is_valid |= pd0[way] & _PAGE_PRESENT;
+ pd0[way] &= PAGE_MASK;
+ }
+
+ /* If all the WAYS in SET are empty, skip to next SET */
+ if (!is_valid)
+ continue;
+
+ /* Scan the set for duplicate ways: needs a nested loop */
+ for (way = 0; way < n_ways - 1; way++) {
+
+ int n;
+
+ if (!pd0[way])
+ continue;
+
+ for (n = way + 1; n < n_ways; n++) {
+ if (pd0[way] != pd0[n])
+ continue;
+
+ if (!dup_pd_silent)
+ pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
+ pd0[way], set, way, n);
+
+ /*
+ * clear entry @way and not @n.
+ * This is critical to our optimised loop
+ */
+ pd0[way] = 0;
+ write_aux_reg(ARC_REG_TLBINDEX,
+ SET_WAY_TO_IDX(mmu, set, way));
+ __tlb_entry_erase();
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
new file mode 100644
index 0000000000..e054780a8f
--- /dev/null
+++ b/arch/arc/mm/tlbex.S
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * TLB Exception Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Vineetg: April 2011 :
+ * -MMU v1: moved out legacy code into a seperate file
+ * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
+ * helps avoid a shift when preparing PD0 from PTE
+ *
+ * Vineetg: July 2009
+ * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
+ * entry, so that it doesn't knock out it's I-TLB entry
+ * -Some more fine tuning:
+ * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
+ *
+ * Vineetg: July 2009
+ * -Practically rewrote the I/D TLB Miss handlers
+ * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
+ * Hence Leaner by 1.5 K
+ * Used Conditional arithmetic to replace excessive branching
+ * Also used short instructions wherever possible
+ *
+ * Vineetg: Aug 13th 2008
+ * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
+ * more information in case of a Fatality
+ *
+ * Vineetg: March 25th Bug #92690
+ * -Added Debug Code to check if sw-ASID == hw-ASID
+
+ * Rahul Trivedi, Amit Bhor: Codito Technologies 2004
+ */
+
+#include <linux/linkage.h>
+#include <linux/pgtable.h>
+#include <asm/entry.h>
+#include <asm/mmu.h>
+#include <asm/arcregs.h>
+#include <asm/cache.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_ISA_ARCOMPACT
+;-----------------------------------------------------------------
+; ARC700 Exception Handling doesn't auto-switch stack and it only provides
+; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
+;
+; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
+; "global" is used to free-up FIRST core reg to be able to code the rest of
+; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
+; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
+; need to be saved as well by extending the "global" to be 4 words. Hence
+; ".size ex_saved_reg1, 16"
+; [All of this dance is to avoid stack switching for each TLB Miss, since we
+; only need to save only a handful of regs, as opposed to complete reg file]
+;
+; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
+; core reg as it will not be SMP safe.
+; Thus scratch AUX reg is used (and no longer used to cache task PGD).
+; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
+; Epilogue thus has to locate the "per-cpu" storage for regs.
+; To avoid cache line bouncing the per-cpu global is aligned/sized per
+; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
+; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
+
+; As simple as that....
+;--------------------------------------------------------------------------
+
+; scratch memory to save [r0-r3] used to code TLB refill Handler
+ARCFP_DATA ex_saved_reg1
+ .align 1 << L1_CACHE_SHIFT
+ .type ex_saved_reg1, @object
+#ifdef CONFIG_SMP
+ .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+ex_saved_reg1:
+ .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+#else
+ .size ex_saved_reg1, 16
+ex_saved_reg1:
+ .zero 16
+#endif
+
+.macro TLBMISS_FREEUP_REGS
+#ifdef CONFIG_SMP
+ sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
+ GET_CPU_ID r0 ; get to per cpu scratch mem,
+ asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
+ add r0, @ex_saved_reg1, r0
+#else
+ st r0, [@ex_saved_reg1]
+ mov_s r0, @ex_saved_reg1
+#endif
+ st_s r1, [r0, 4]
+ st_s r2, [r0, 8]
+ st_s r3, [r0, 12]
+.endm
+
+.macro TLBMISS_RESTORE_REGS
+#ifdef CONFIG_SMP
+ GET_CPU_ID r0 ; get to per cpu scratch mem
+ asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
+ add r0, @ex_saved_reg1, r0
+ ld_s r3, [r0,12]
+ ld_s r2, [r0, 8]
+ ld_s r1, [r0, 4]
+ lr r0, [ARC_REG_SCRATCH_DATA0]
+#else
+ mov_s r0, @ex_saved_reg1
+ ld_s r3, [r0,12]
+ ld_s r2, [r0, 8]
+ ld_s r1, [r0, 4]
+ ld_s r0, [r0]
+#endif
+.endm
+
+#else /* ARCv2 */
+
+.macro TLBMISS_FREEUP_REGS
+#ifdef CONFIG_ARC_HAS_LL64
+ std r0, [sp, -16]
+ std r2, [sp, -8]
+#else
+ PUSH r0
+ PUSH r1
+ PUSH r2
+ PUSH r3
+#endif
+.endm
+
+.macro TLBMISS_RESTORE_REGS
+#ifdef CONFIG_ARC_HAS_LL64
+ ldd r0, [sp, -16]
+ ldd r2, [sp, -8]
+#else
+ POP r3
+ POP r2
+ POP r1
+ POP r0
+#endif
+.endm
+
+#endif
+
+;============================================================================
+;TLB Miss handling Code
+;============================================================================
+
+#ifndef PMD_SHIFT
+#define PMD_SHIFT PUD_SHIFT
+#endif
+
+#ifndef PUD_SHIFT
+#define PUD_SHIFT PGDIR_SHIFT
+#endif
+
+;-----------------------------------------------------------------------------
+; This macro does the page-table lookup for the faulting address.
+; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
+.macro LOAD_FAULT_PTE
+
+ lr r2, [efa]
+
+#ifdef CONFIG_ISA_ARCV2
+ lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
+#else
+ GET_CURR_TASK_ON_CPU r1
+ ld r1, [r1, TASK_ACT_MM]
+ ld r1, [r1, MM_PGD]
+#endif
+
+ lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
+ ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr
+ tst r3, r3
+ bz do_slow_path_pf ; if no Page Table, do page fault
+
+#if CONFIG_PGTABLE_LEVELS > 3
+ lsr r0, r2, PUD_SHIFT ; Bits for indexing into PUD
+ and r0, r0, (PTRS_PER_PUD - 1)
+ ld.as r1, [r3, r0] ; PMD entry
+ tst r1, r1
+ bz do_slow_path_pf
+ mov r3, r1
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+ lsr r0, r2, PMD_SHIFT ; Bits for indexing into PMD
+ and r0, r0, (PTRS_PER_PMD - 1)
+ ld.as r1, [r3, r0] ; PMD entry
+ tst r1, r1
+ bz do_slow_path_pf
+ mov r3, r1
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp)
+ add2.nz r1, r1, r0
+ bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk
+ mov.nz r0, r3
+
+#endif
+ and r1, r3, PAGE_MASK
+
+ ; Get the PTE entry: The idea is
+ ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
+ ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
+ ; (3) z = (pgtbl + y * 4)
+
+#ifdef CONFIG_ARC_HAS_PAE40
+#define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */
+#else
+#define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */
+#endif
+
+ ; multiply in step (3) above avoided by shifting lesser in step (1)
+ lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG )
+ and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG )
+ ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40)
+ ; r1: PTE ptr
+
+2:
+
+.endm
+
+;-----------------------------------------------------------------
+; Convert Linux PTE entry into TLB entry
+; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
+; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI])
+; IN: r0 = PTE, r1 = ptr to PTE
+
+.macro CONV_PTE_TO_TLB
+ and r3, r0, PTE_BITS_RWX ; r w x
+ asl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only)
+ and.f 0, r0, _PAGE_GLOBAL
+ or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
+
+ and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
+ or r3, r3, r2
+
+ sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C
+#ifdef CONFIG_ARC_HAS_PAE40
+ ld r3, [r1, 4] ; paddr[39..32]
+ sr r3, [ARC_REG_TLBPD1HI]
+#endif
+
+ and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
+
+ lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid
+
+ or r3, r3, r2 ; S | vaddr | {sasid|asid}
+ sr r3,[ARC_REG_TLBPD0] ; rewrite PD0
+.endm
+
+;-----------------------------------------------------------------
+; Commit the TLB entry into MMU
+
+.macro COMMIT_ENTRY_TO_MMU
+#ifdef CONFIG_ARC_MMU_V3
+
+ /* Get free TLB slot: Set = computed from vaddr, way = random */
+ sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+
+ /* Commit the Write */
+ sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
+
+#else
+ sr TLBInsertEntry, [ARC_REG_TLBCOMMAND]
+#endif
+
+88:
+.endm
+
+
+ARCFP_CODE ;Fast Path Code, candidate for ICCM
+
+;-----------------------------------------------------------------------------
+; I-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ENTRY(EV_TLBMissI)
+
+ TLBMISS_FREEUP_REGS
+
+ ;----------------------------------------------------------------
+ ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
+ LOAD_FAULT_PTE
+
+ ;----------------------------------------------------------------
+ ; VERIFY_PTE: Check if PTE permissions approp for executing code
+ cmp_s r2, VMALLOC_START
+ mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE)
+ or.hs r2, r2, _PAGE_GLOBAL
+
+ and r3, r0, r2 ; Mask out NON Flag bits from PTE
+ xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
+ bnz do_slow_path_pf
+
+ ; Let Linux VM know that the page was accessed
+ or r0, r0, _PAGE_ACCESSED ; set Accessed Bit
+ st_s r0, [r1] ; Write back PTE
+
+ CONV_PTE_TO_TLB
+ COMMIT_ENTRY_TO_MMU
+ TLBMISS_RESTORE_REGS
+EV_TLBMissI_fast_ret: ; additional label for VDK OS-kit instrumentation
+ rtie
+
+END(EV_TLBMissI)
+
+;-----------------------------------------------------------------------------
+; D-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ENTRY(EV_TLBMissD)
+
+ TLBMISS_FREEUP_REGS
+
+ ;----------------------------------------------------------------
+ ; Get the PTE corresponding to V-addr accessed
+ ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
+ LOAD_FAULT_PTE
+
+ ;----------------------------------------------------------------
+ ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
+
+ cmp_s r2, VMALLOC_START
+ mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE
+ or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only
+
+ ; Linux PTE [RWX] bits are semantically overloaded:
+ ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc)
+ ; -Otherwise they are user-mode permissions, and those are exactly
+ ; same for kernel mode as well (e.g. copy_(to|from)_user)
+
+ lr r3, [ecr]
+ btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
+ or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
+ or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
+ ; Above laddering takes care of XCHG access (both R and W)
+
+ ; By now, r2 setup with all the Flags we need to check in PTE
+ and r3, r0, r2 ; Mask out NON Flag bits from PTE
+ brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
+
+ ;----------------------------------------------------------------
+ ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
+ or r0, r0, _PAGE_ACCESSED ; Accessed bit always
+ or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well
+ st_s r0, [r1] ; Write back PTE
+
+ CONV_PTE_TO_TLB
+
+ COMMIT_ENTRY_TO_MMU
+ TLBMISS_RESTORE_REGS
+EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation
+ rtie
+
+;-------- Common routine to call Linux Page Fault Handler -----------
+do_slow_path_pf:
+
+#ifdef CONFIG_ISA_ARCV2
+ ; Set Z flag if exception in U mode. Hardware micro-ops do this on any
+ ; taken interrupt/exception, and thus is already the case at the entry
+ ; above, but ensuing code would have already clobbered.
+ ; EXCEPTION_PROLOGUE called in slow path, relies on correct Z flag set
+
+ lr r2, [erstatus]
+ and r2, r2, STATUS_U_MASK
+ bxor.f 0, r2, STATUS_U_BIT
+#endif
+
+ ; Restore the 4-scratch regs saved by fast path miss handler
+ TLBMISS_RESTORE_REGS
+
+ ; Slow path TLB Miss handled as a regular ARC Exception
+ ; (stack switching / save the complete reg-file).
+ b call_do_page_fault
+END(EV_TLBMissD)