summaryrefslogtreecommitdiffstats
path: root/arch/csky/mm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /arch/csky/mm
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/csky/mm')
-rw-r--r--arch/csky/mm/Makefile19
-rw-r--r--arch/csky/mm/asid.c189
-rw-r--r--arch/csky/mm/cachev1.c136
-rw-r--r--arch/csky/mm/cachev2.c120
-rw-r--r--arch/csky/mm/context.c46
-rw-r--r--arch/csky/mm/dma-mapping.c88
-rw-r--r--arch/csky/mm/fault.c218
-rw-r--r--arch/csky/mm/highmem.c109
-rw-r--r--arch/csky/mm/init.c194
-rw-r--r--arch/csky/mm/ioremap.c19
-rw-r--r--arch/csky/mm/syscache.c32
-rw-r--r--arch/csky/mm/tcm.c169
-rw-r--r--arch/csky/mm/tlb.c170
13 files changed, 1509 insertions, 0 deletions
diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile
new file mode 100644
index 000000000..6e7696e55
--- /dev/null
+++ b/arch/csky/mm/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ifeq ($(CONFIG_CPU_HAS_CACHEV2),y)
+obj-y += cachev2.o
+CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE)
+else
+obj-y += cachev1.o
+CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE)
+endif
+
+obj-y += dma-mapping.o
+obj-y += fault.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-y += init.o
+obj-y += ioremap.o
+obj-y += syscache.o
+obj-y += tlb.o
+obj-y += asid.o
+obj-y += context.o
+obj-$(CONFIG_HAVE_TCM) += tcm.o
diff --git a/arch/csky/mm/asid.c b/arch/csky/mm/asid.c
new file mode 100644
index 000000000..b2e914745
--- /dev/null
+++ b/arch/csky/mm/asid.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic ASID allocator.
+ *
+ * Based on arch/arm/mm/context.c
+ *
+ * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/slab.h>
+#include <linux/mm_types.h>
+
+#include <asm/asid.h>
+
+#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
+
+#define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0))
+#define ASID_FIRST_VERSION(info) (1UL << ((info)->bits))
+
+#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
+#define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
+
+static void flush_context(struct asid_info *info)
+{
+ int i;
+ u64 asid;
+
+ /* Update the list of reserved ASIDs and the ASID bitmap. */
+ bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
+
+ for_each_possible_cpu(i) {
+ asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
+ /*
+ * If this CPU has already been through a
+ * rollover, but hasn't run another task in
+ * the meantime, we must preserve its reserved
+ * ASID, as this is the only trace we have of
+ * the process it is still running.
+ */
+ if (asid == 0)
+ asid = reserved_asid(info, i);
+ __set_bit(asid2idx(info, asid), info->map);
+ reserved_asid(info, i) = asid;
+ }
+
+ /*
+ * Queue a TLB invalidation for each CPU to perform on next
+ * context-switch
+ */
+ cpumask_setall(&info->flush_pending);
+}
+
+static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
+ u64 newasid)
+{
+ int cpu;
+ bool hit = false;
+
+ /*
+ * Iterate over the set of reserved ASIDs looking for a match.
+ * If we find one, then we can update our mm to use newasid
+ * (i.e. the same ASID in the current generation) but we can't
+ * exit the loop early, since we need to ensure that all copies
+ * of the old ASID are updated to reflect the mm. Failure to do
+ * so could result in us missing the reserved ASID in a future
+ * generation.
+ */
+ for_each_possible_cpu(cpu) {
+ if (reserved_asid(info, cpu) == asid) {
+ hit = true;
+ reserved_asid(info, cpu) = newasid;
+ }
+ }
+
+ return hit;
+}
+
+static u64 new_context(struct asid_info *info, atomic64_t *pasid,
+ struct mm_struct *mm)
+{
+ static u32 cur_idx = 1;
+ u64 asid = atomic64_read(pasid);
+ u64 generation = atomic64_read(&info->generation);
+
+ if (asid != 0) {
+ u64 newasid = generation | (asid & ~ASID_MASK(info));
+
+ /*
+ * If our current ASID was active during a rollover, we
+ * can continue to use it and this was just a false alarm.
+ */
+ if (check_update_reserved_asid(info, asid, newasid))
+ return newasid;
+
+ /*
+ * We had a valid ASID in a previous life, so try to re-use
+ * it if possible.
+ */
+ if (!__test_and_set_bit(asid2idx(info, asid), info->map))
+ return newasid;
+ }
+
+ /*
+ * Allocate a free ASID. If we can't find one, take a note of the
+ * currently active ASIDs and mark the TLBs as requiring flushes. We
+ * always count from ASID #2 (index 1), as we use ASID #0 when setting
+ * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
+ * pairs.
+ */
+ asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
+ if (asid != NUM_CTXT_ASIDS(info))
+ goto set_asid;
+
+ /* We're out of ASIDs, so increment the global generation count */
+ generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
+ &info->generation);
+ flush_context(info);
+
+ /* We have more ASIDs than CPUs, so this will always succeed */
+ asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
+
+set_asid:
+ __set_bit(asid, info->map);
+ cur_idx = asid;
+ cpumask_clear(mm_cpumask(mm));
+ return idx2asid(info, asid) | generation;
+}
+
+/*
+ * Generate a new ASID for the context.
+ *
+ * @pasid: Pointer to the current ASID batch allocated. It will be updated
+ * with the new ASID batch.
+ * @cpu: current CPU ID. Must have been acquired through get_cpu()
+ */
+void asid_new_context(struct asid_info *info, atomic64_t *pasid,
+ unsigned int cpu, struct mm_struct *mm)
+{
+ unsigned long flags;
+ u64 asid;
+
+ raw_spin_lock_irqsave(&info->lock, flags);
+ /* Check that our ASID belongs to the current generation. */
+ asid = atomic64_read(pasid);
+ if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
+ asid = new_context(info, pasid, mm);
+ atomic64_set(pasid, asid);
+ }
+
+ if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
+ info->flush_cpu_ctxt_cb();
+
+ atomic64_set(&active_asid(info, cpu), asid);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+ raw_spin_unlock_irqrestore(&info->lock, flags);
+}
+
+/*
+ * Initialize the ASID allocator
+ *
+ * @info: Pointer to the asid allocator structure
+ * @bits: Number of ASIDs available
+ * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
+ * allocated contiguously for a given context. This value should be a power of
+ * 2.
+ */
+int asid_allocator_init(struct asid_info *info,
+ u32 bits, unsigned int asid_per_ctxt,
+ void (*flush_cpu_ctxt_cb)(void))
+{
+ info->bits = bits;
+ info->ctxt_shift = ilog2(asid_per_ctxt);
+ info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb;
+ /*
+ * Expect allocation after rollover to fail if we don't have at least
+ * one more ASID than CPUs. ASID #0 is always reserved.
+ */
+ WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
+ atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
+ info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
+ sizeof(*info->map), GFP_KERNEL);
+ if (!info->map)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&info->lock);
+
+ return 0;
+}
diff --git a/arch/csky/mm/cachev1.c b/arch/csky/mm/cachev1.c
new file mode 100644
index 000000000..5a5a9804a
--- /dev/null
+++ b/arch/csky/mm/cachev1.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/spinlock.h>
+#include <asm/cache.h>
+#include <abi/reg_ops.h>
+
+/* for L1-cache */
+#define INS_CACHE (1 << 0)
+#define DATA_CACHE (1 << 1)
+#define CACHE_INV (1 << 4)
+#define CACHE_CLR (1 << 5)
+#define CACHE_OMS (1 << 6)
+#define CACHE_ITS (1 << 7)
+#define CACHE_LICF (1 << 31)
+
+/* for L2-cache */
+#define CR22_LEVEL_SHIFT (1)
+#define CR22_SET_SHIFT (7)
+#define CR22_WAY_SHIFT (30)
+#define CR22_WAY_SHIFT_L2 (29)
+
+static DEFINE_SPINLOCK(cache_lock);
+
+static inline void cache_op_line(unsigned long i, unsigned int val)
+{
+ mtcr("cr22", i);
+ mtcr("cr17", val);
+}
+
+#define CCR2_L2E (1 << 3)
+static void cache_op_all(unsigned int value, unsigned int l2)
+{
+ mtcr("cr17", value | CACHE_CLR);
+ mb();
+
+ if (l2 && (mfcr_ccr2() & CCR2_L2E)) {
+ mtcr("cr24", value | CACHE_CLR);
+ mb();
+ }
+}
+
+static void cache_op_range(
+ unsigned int start,
+ unsigned int end,
+ unsigned int value,
+ unsigned int l2)
+{
+ unsigned long i, flags;
+ unsigned int val = value | CACHE_CLR | CACHE_OMS;
+ bool l2_sync;
+
+ if (unlikely((end - start) >= PAGE_SIZE) ||
+ unlikely(start < PAGE_OFFSET) ||
+ unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) {
+ cache_op_all(value, l2);
+ return;
+ }
+
+ if ((mfcr_ccr2() & CCR2_L2E) && l2)
+ l2_sync = 1;
+ else
+ l2_sync = 0;
+
+ spin_lock_irqsave(&cache_lock, flags);
+
+ i = start & ~(L1_CACHE_BYTES - 1);
+ for (; i < end; i += L1_CACHE_BYTES) {
+ cache_op_line(i, val);
+ if (l2_sync) {
+ mb();
+ mtcr("cr24", val);
+ }
+ }
+ spin_unlock_irqrestore(&cache_lock, flags);
+
+ mb();
+}
+
+void dcache_wb_line(unsigned long start)
+{
+ asm volatile("idly4\n":::"memory");
+ cache_op_line(start, DATA_CACHE|CACHE_CLR);
+ mb();
+}
+
+void icache_inv_range(unsigned long start, unsigned long end)
+{
+ cache_op_range(start, end, INS_CACHE|CACHE_INV, 0);
+}
+
+void icache_inv_all(void)
+{
+ cache_op_all(INS_CACHE|CACHE_INV, 0);
+}
+
+void local_icache_inv_all(void *priv)
+{
+ cache_op_all(INS_CACHE|CACHE_INV, 0);
+}
+
+void dcache_wb_range(unsigned long start, unsigned long end)
+{
+ cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0);
+}
+
+void dcache_wbinv_all(void)
+{
+ cache_op_all(DATA_CACHE|CACHE_CLR|CACHE_INV, 0);
+}
+
+void cache_wbinv_range(unsigned long start, unsigned long end)
+{
+ cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0);
+}
+EXPORT_SYMBOL(cache_wbinv_range);
+
+void cache_wbinv_all(void)
+{
+ cache_op_all(INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0);
+}
+
+void dma_wbinv_range(unsigned long start, unsigned long end)
+{
+ cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
+}
+
+void dma_inv_range(unsigned long start, unsigned long end)
+{
+ cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
+}
+
+void dma_wb_range(unsigned long start, unsigned long end)
+{
+ cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
+}
diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c
new file mode 100644
index 000000000..7a9664adc
--- /dev/null
+++ b/arch/csky/mm/cachev2.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <asm/cache.h>
+#include <asm/barrier.h>
+
+/* for L1-cache */
+#define INS_CACHE (1 << 0)
+#define DATA_CACHE (1 << 1)
+#define CACHE_INV (1 << 4)
+#define CACHE_CLR (1 << 5)
+#define CACHE_OMS (1 << 6)
+
+void local_icache_inv_all(void *priv)
+{
+ mtcr("cr17", INS_CACHE|CACHE_INV);
+ sync_is();
+}
+
+#ifdef CONFIG_CPU_HAS_ICACHE_INS
+void icache_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long i = start & ~(L1_CACHE_BYTES - 1);
+
+ for (; i < end; i += L1_CACHE_BYTES)
+ asm volatile("icache.iva %0\n"::"r"(i):"memory");
+ sync_is();
+}
+#else
+struct cache_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+static DEFINE_SPINLOCK(cache_lock);
+
+static inline void cache_op_line(unsigned long i, unsigned int val)
+{
+ mtcr("cr22", i);
+ mtcr("cr17", val);
+}
+
+void local_icache_inv_range(void *priv)
+{
+ struct cache_range *param = priv;
+ unsigned long i = param->start & ~(L1_CACHE_BYTES - 1);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache_lock, flags);
+
+ for (; i < param->end; i += L1_CACHE_BYTES)
+ cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS);
+
+ spin_unlock_irqrestore(&cache_lock, flags);
+
+ sync_is();
+}
+
+void icache_inv_range(unsigned long start, unsigned long end)
+{
+ struct cache_range param = { start, end };
+
+ if (irqs_disabled())
+ local_icache_inv_range(&param);
+ else
+ on_each_cpu(local_icache_inv_range, &param, 1);
+}
+#endif
+
+inline void dcache_wb_line(unsigned long start)
+{
+ asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
+ sync_is();
+}
+
+void dcache_wb_range(unsigned long start, unsigned long end)
+{
+ unsigned long i = start & ~(L1_CACHE_BYTES - 1);
+
+ for (; i < end; i += L1_CACHE_BYTES)
+ asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
+ sync_is();
+}
+
+void cache_wbinv_range(unsigned long start, unsigned long end)
+{
+ dcache_wb_range(start, end);
+ icache_inv_range(start, end);
+}
+EXPORT_SYMBOL(cache_wbinv_range);
+
+void dma_wbinv_range(unsigned long start, unsigned long end)
+{
+ unsigned long i = start & ~(L1_CACHE_BYTES - 1);
+
+ for (; i < end; i += L1_CACHE_BYTES)
+ asm volatile("dcache.civa %0\n"::"r"(i):"memory");
+ sync_is();
+}
+
+void dma_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long i = start & ~(L1_CACHE_BYTES - 1);
+
+ for (; i < end; i += L1_CACHE_BYTES)
+ asm volatile("dcache.iva %0\n"::"r"(i):"memory");
+ sync_is();
+}
+
+void dma_wb_range(unsigned long start, unsigned long end)
+{
+ unsigned long i = start & ~(L1_CACHE_BYTES - 1);
+
+ for (; i < end; i += L1_CACHE_BYTES)
+ asm volatile("dcache.cva %0\n"::"r"(i):"memory");
+ sync_is();
+}
diff --git a/arch/csky/mm/context.c b/arch/csky/mm/context.c
new file mode 100644
index 000000000..0d95bdd93
--- /dev/null
+++ b/arch/csky/mm/context.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/bitops.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/asid.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/tlbflush.h>
+
+static DEFINE_PER_CPU(atomic64_t, active_asids);
+static DEFINE_PER_CPU(u64, reserved_asids);
+
+struct asid_info asid_info;
+
+void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
+{
+ asid_check_context(&asid_info, &mm->context.asid, cpu, mm);
+}
+
+static void asid_flush_cpu_ctxt(void)
+{
+ local_tlb_invalid_all();
+}
+
+static int asids_init(void)
+{
+ BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus());
+
+ if (asid_allocator_init(&asid_info, CONFIG_CPU_ASID_BITS, 1,
+ asid_flush_cpu_ctxt))
+ panic("Unable to initialize ASID allocator for %lu ASIDs\n",
+ NUM_ASIDS(&asid_info));
+
+ asid_info.active = &active_asids;
+ asid_info.reserved = &reserved_asids;
+
+ pr_info("ASID allocator initialised with %lu entries\n",
+ NUM_CTXT_ASIDS(&asid_info));
+
+ return 0;
+}
+early_initcall(asids_init);
diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
new file mode 100644
index 000000000..c3a775a7e
--- /dev/null
+++ b/arch/csky/mm/dma-mapping.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/cache.h>
+#include <linux/dma-map-ops.h>
+#include <linux/genalloc.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <asm/cache.h>
+
+static inline void cache_op(phys_addr_t paddr, size_t size,
+ void (*fn)(unsigned long start, unsigned long end))
+{
+ struct page *page = phys_to_page(paddr);
+ void *start = __va(page_to_phys(page));
+ unsigned long offset = offset_in_page(paddr);
+ size_t left = size;
+
+ do {
+ size_t len = left;
+
+ if (offset + len > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ if (PageHighMem(page)) {
+ start = kmap_atomic(page);
+
+ fn((unsigned long)start + offset,
+ (unsigned long)start + offset + len);
+
+ kunmap_atomic(start);
+ } else {
+ fn((unsigned long)start + offset,
+ (unsigned long)start + offset + len);
+ }
+ offset = 0;
+
+ page++;
+ start += PAGE_SIZE;
+ left -= len;
+ } while (left);
+}
+
+static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
+{
+ memset((void *)start, 0, end - start);
+ dma_wbinv_range(start, end);
+}
+
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
+}
+
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ cache_op(paddr, size, dma_wb_range);
+ break;
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ cache_op(paddr, size, dma_wbinv_range);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ return;
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ cache_op(paddr, size, dma_inv_range);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
new file mode 100644
index 000000000..081b178b4
--- /dev/null
+++ b/arch/csky/mm/fault.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/signal.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/version.h>
+#include <linux/vt_kern.h>
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+#include <linux/perf_event.h>
+#include <linux/kprobes.h>
+
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/traps.h>
+#include <asm/page.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+ if (fixup) {
+ regs->pc = fixup->nextinsn;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
+ unsigned long mmu_meh)
+{
+ struct vm_area_struct *vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ int si_code;
+ int fault;
+ unsigned long address = mmu_meh & PAGE_MASK;
+
+ if (kprobe_page_fault(regs, tsk->thread.trap_no))
+ return;
+
+ si_code = SEGV_MAPERR;
+
+#ifndef CONFIG_CPU_HAS_TLBI
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (unlikely(address >= VMALLOC_START) &&
+ unlikely(address <= VMALLOC_END)) {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int offset = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ unsigned long pgd_base;
+
+ pgd_base = (unsigned long)__va(get_pgd());
+ pgd = (pgd_t *)pgd_base + offset;
+ pgd_k = init_mm.pgd + offset;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+ set_pgd(pgd, *pgd_k);
+
+ pud = (pud_t *)pgd;
+ pud_k = (pud_t *)pgd_k;
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+ set_pmd(pmd, *pmd_k);
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+#endif
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto bad_area_nosemaphore;
+
+ mmap_read_lock(mm);
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ si_code = SEGV_ACCERR;
+
+ if (write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (unlikely(!vma_is_accessible(vma)))
+ goto bad_area;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0,
+ regs);
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
+ BUG();
+ }
+ mmap_read_unlock(mm);
+ return;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ mmap_read_unlock(mm);
+
+bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ tsk->thread.trap_no = trap_no(regs);
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ return;
+ }
+
+no_context:
+ tsk->thread.trap_no = trap_no(regs);
+
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+ pr_alert("Unable to handle kernel paging request at virtual "
+ "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
+ die(regs, "Oops");
+
+out_of_memory:
+ tsk->thread.trap_no = trap_no(regs);
+
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+ pagefault_out_of_memory();
+ return;
+
+do_sigbus:
+ tsk->thread.trap_no = trap_no(regs);
+
+ mmap_read_unlock(mm);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
+}
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
new file mode 100644
index 000000000..89c10800a
--- /dev/null
+++ b/arch/csky/mm/highmem.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/smp.h>
+#include <linux/memblock.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+static pte_t *kmap_pte;
+
+unsigned long highstart_pfn, highend_pfn;
+
+void kmap_flush_tlb(unsigned long addr)
+{
+ flush_tlb_one(addr);
+}
+EXPORT_SYMBOL(kmap_flush_tlb);
+
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+{
+ unsigned long vaddr;
+ int idx, type;
+
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
+ flush_tlb_one((unsigned long)vaddr);
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic_high_prot);
+
+void kunmap_atomic_high(void *kvaddr)
+{
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ int idx;
+
+ if (vaddr < FIXADDR_START)
+ return;
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+ idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
+
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ pte_clear(&init_mm, vaddr, kmap_pte - idx);
+ flush_tlb_one(vaddr);
+#else
+ (void) idx; /* to kill a warning */
+#endif
+ kmap_atomic_idx_pop();
+}
+EXPORT_SYMBOL(kunmap_atomic_high);
+
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn)
+{
+ unsigned long vaddr;
+ int idx, type;
+
+ pagefault_disable();
+
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
+ flush_tlb_one(vaddr);
+
+ return (void *) vaddr;
+}
+
+static void __init kmap_pages_init(void)
+{
+ unsigned long vaddr;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pud_t *pud;
+ pte_t *pte;
+
+ vaddr = PKMAP_BASE;
+ fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
+
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ pud = (pud_t *)pgd;
+ pmd = pmd_offset(pud, vaddr);
+ pte = pte_offset_kernel(pmd, vaddr);
+ pkmap_page_table = pte;
+}
+
+void __init kmap_init(void)
+{
+ unsigned long vaddr;
+
+ kmap_pages_init();
+
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
+
+ kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
+}
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
new file mode 100644
index 000000000..af6271283
--- /dev/null
+++ b/arch/csky/mm/init.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/bug.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/swap.h>
+#include <linux/proc_fs.h>
+#include <linux/pfn.h>
+#include <linux/initrd.h>
+
+#include <asm/setup.h>
+#include <asm/cachectl.h>
+#include <asm/dma.h>
+#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/tlb.h>
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pte_table);
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+ __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+static void __init setup_initrd(void)
+{
+ unsigned long size;
+
+ if (initrd_start >= initrd_end) {
+ pr_err("initrd not found or empty");
+ goto disable;
+ }
+
+ if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ pr_err("initrd extends beyond end of memory");
+ goto disable;
+ }
+
+ size = initrd_end - initrd_start;
+
+ if (memblock_is_region_reserved(__pa(initrd_start), size)) {
+ pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
+ __pa(initrd_start), size);
+ goto disable;
+ }
+
+ memblock_reserve(__pa(initrd_start), size);
+
+ pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *)(initrd_start), size);
+
+ initrd_below_start_ok = 1;
+
+ return;
+
+disable:
+ initrd_start = initrd_end = 0;
+
+ pr_err(" - disabling initrd\n");
+}
+#endif
+
+void __init mem_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long tmp;
+
+ max_mapnr = highend_pfn;
+#else
+ max_mapnr = max_low_pfn;
+#endif
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ setup_initrd();
+#endif
+
+ memblock_free_all();
+
+#ifdef CONFIG_HIGHMEM
+ for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
+ struct page *page = pfn_to_page(tmp);
+
+ /* FIXME not sure about */
+ if (!memblock_is_reserved(tmp << PAGE_SHIFT))
+ free_highmem_page(page);
+ }
+#endif
+ mem_init_print_info(NULL);
+}
+
+extern char __init_begin[], __init_end[];
+
+void free_initmem(void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long) &__init_begin;
+
+ while (addr < (unsigned long) &__init_end) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages_inc();
+ addr += PAGE_SIZE;
+ }
+
+ pr_info("Freeing unused kernel memory: %dk freed\n",
+ ((unsigned int)&__init_end - (unsigned int)&__init_begin) >> 10);
+}
+
+void pgd_init(unsigned long *p)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PGD; i++)
+ p[i] = __pa(invalid_pte_table);
+}
+
+void __init pre_mmu_init(void)
+{
+ /*
+ * Setup page-table and enable TLB-hardrefill
+ */
+ flush_tlb_all();
+ pgd_init((unsigned long *)swapper_pg_dir);
+ TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
+ TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir);
+
+ /* Setup page mask to 4k */
+ write_mmu_pagemask(0);
+}
+
+void __init fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i, j, k;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = pgd_index(vaddr);
+ j = pud_index(vaddr);
+ k = pmd_index(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+ pud = (pud_t *)pgd;
+ for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+ pmd = (pmd_t *)pud;
+ for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE,
+ PAGE_SIZE);
+
+ set_pmd(pmd, __pmd(__pa(pte)));
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ }
+ vaddr += PMD_SIZE;
+ }
+ k = 0;
+ }
+ j = 0;
+ }
+}
+
+void __init fixaddr_init(void)
+{
+ unsigned long vaddr;
+
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
+}
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c
new file mode 100644
index 000000000..70c8268d3
--- /dev/null
+++ b/arch/csky/mm/ioremap.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+ if (!pfn_valid(pfn)) {
+ return pgprot_noncached(vma_prot);
+ } else if (file->f_flags & O_SYNC) {
+ return pgprot_writecombine(vma_prot);
+ }
+
+ return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c
new file mode 100644
index 000000000..cd847ad62
--- /dev/null
+++ b/arch/csky/mm/syscache.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/syscalls.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+
+SYSCALL_DEFINE3(cacheflush,
+ void __user *, addr,
+ unsigned long, bytes,
+ int, cache)
+{
+ switch (cache) {
+ case BCACHE:
+ case DCACHE:
+ dcache_wb_range((unsigned long)addr,
+ (unsigned long)addr + bytes);
+ if (cache != BCACHE)
+ break;
+ fallthrough;
+ case ICACHE:
+ flush_icache_mm_range(current->mm,
+ (unsigned long)addr,
+ (unsigned long)addr + bytes);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/arch/csky/mm/tcm.c b/arch/csky/mm/tcm.c
new file mode 100644
index 000000000..ddeb36328
--- /dev/null
+++ b/arch/csky/mm/tcm.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/highmem.h>
+#include <linux/genalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/fixmap.h>
+
+#if (CONFIG_ITCM_RAM_BASE == 0xffffffff)
+#error "You should define ITCM_RAM_BASE"
+#endif
+
+#ifdef CONFIG_HAVE_DTCM
+#if (CONFIG_DTCM_RAM_BASE == 0xffffffff)
+#error "You should define DTCM_RAM_BASE"
+#endif
+
+#if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE)
+#error "You should define correct DTCM_RAM_BASE"
+#endif
+#endif
+
+extern char __tcm_start, __tcm_end, __dtcm_start;
+
+static struct gen_pool *tcm_pool;
+
+static void __init tcm_mapping_init(void)
+{
+ pte_t *tcm_pte;
+ unsigned long vaddr, paddr;
+ int i;
+
+ paddr = CONFIG_ITCM_RAM_BASE;
+
+ if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE)))
+ goto panic;
+
+#ifndef CONFIG_HAVE_DTCM
+ for (i = 0; i < TCM_NR_PAGES; i++) {
+#else
+ for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) {
+#endif
+ vaddr = __fix_to_virt(FIX_TCM - i);
+
+ tcm_pte =
+ pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
+
+ set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
+
+ flush_tlb_one(vaddr);
+
+ paddr = paddr + PAGE_SIZE;
+ }
+
+#ifdef CONFIG_HAVE_DTCM
+ if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE)))
+ goto panic;
+
+ paddr = CONFIG_DTCM_RAM_BASE;
+
+ for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) {
+ vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i);
+
+ tcm_pte =
+ pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr);
+
+ set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
+
+ flush_tlb_one(vaddr);
+
+ paddr = paddr + PAGE_SIZE;
+ }
+#endif
+
+#ifndef CONFIG_HAVE_DTCM
+ memcpy((void *)__fix_to_virt(FIX_TCM),
+ &__tcm_start, &__tcm_end - &__tcm_start);
+
+ pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n",
+ __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
+
+ pr_info("%s: __tcm_start va:0x%08lx size:%d\n",
+ __func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start);
+#else
+ memcpy((void *)__fix_to_virt(FIX_TCM),
+ &__tcm_start, &__dtcm_start - &__tcm_start);
+
+ pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n",
+ __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
+
+ pr_info("%s: __itcm_start va:0x%08lx size:%d\n",
+ __func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start);
+
+ memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
+ &__dtcm_start, &__tcm_end - &__dtcm_start);
+
+ pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n",
+ __func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
+ CONFIG_DTCM_RAM_BASE);
+
+ pr_info("%s: __dtcm_start va:0x%08lx size:%d\n",
+ __func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start);
+
+#endif
+ return;
+panic:
+ panic("TCM init error");
+}
+
+void *tcm_alloc(size_t len)
+{
+ unsigned long vaddr;
+
+ if (!tcm_pool)
+ return NULL;
+
+ vaddr = gen_pool_alloc(tcm_pool, len);
+ if (!vaddr)
+ return NULL;
+
+ return (void *) vaddr;
+}
+EXPORT_SYMBOL(tcm_alloc);
+
+void tcm_free(void *addr, size_t len)
+{
+ gen_pool_free(tcm_pool, (unsigned long) addr, len);
+}
+EXPORT_SYMBOL(tcm_free);
+
+static int __init tcm_setup_pool(void)
+{
+#ifndef CONFIG_HAVE_DTCM
+ u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE)
+ - (u32) (&__tcm_end - &__tcm_start);
+
+ u32 tcm_pool_start = __fix_to_virt(FIX_TCM)
+ + (u32) (&__tcm_end - &__tcm_start);
+#else
+ u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE)
+ - (u32) (&__tcm_end - &__dtcm_start);
+
+ u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES)
+ + (u32) (&__tcm_end - &__dtcm_start);
+#endif
+ int ret;
+
+ tcm_pool = gen_pool_create(2, -1);
+
+ ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1);
+ if (ret) {
+ pr_err("%s: gen_pool add failed!\n", __func__);
+ return ret;
+ }
+
+ pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n",
+ __func__, pool_size, tcm_pool_start);
+
+ return 0;
+}
+
+static int __init tcm_init(void)
+{
+ tcm_mapping_init();
+
+ tcm_setup_pool();
+
+ return 0;
+}
+arch_initcall(tcm_init);
diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c
new file mode 100644
index 000000000..ed1512381
--- /dev/null
+++ b/arch/csky/mm/tlb.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/mmu_context.h>
+#include <asm/setup.h>
+
+/*
+ * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
+ * 1VPN -> 2PFN
+ */
+#define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
+#define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
+
+void flush_tlb_all(void)
+{
+ tlb_invalid_all();
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+#ifdef CONFIG_CPU_HAS_TLBI
+ asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
+#else
+ tlb_invalid_all();
+#endif
+}
+
+/*
+ * MMU operation regs only could invalid tlb entry in jtlb and we
+ * need change asid field to invalid I-utlb & D-utlb.
+ */
+#ifndef CONFIG_CPU_HAS_TLBI
+#define restore_asid_inv_utlb(oldpid, newpid) \
+do { \
+ if (oldpid == newpid) \
+ write_mmu_entryhi(oldpid + 1); \
+ write_mmu_entryhi(oldpid); \
+} while (0)
+#endif
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned long newpid = cpu_asid(vma->vm_mm);
+
+ start &= TLB_ENTRY_SIZE_MASK;
+ end += TLB_ENTRY_SIZE - 1;
+ end &= TLB_ENTRY_SIZE_MASK;
+
+#ifdef CONFIG_CPU_HAS_TLBI
+ while (start < end) {
+ asm volatile("tlbi.vas %0"::"r"(start | newpid));
+ start += 2*PAGE_SIZE;
+ }
+ sync_is();
+#else
+ {
+ unsigned long flags, oldpid;
+
+ local_irq_save(flags);
+ oldpid = read_mmu_entryhi() & ASID_MASK;
+ while (start < end) {
+ int idx;
+
+ write_mmu_entryhi(start | newpid);
+ start += 2*PAGE_SIZE;
+ tlb_probe();
+ idx = read_mmu_index();
+ if (idx >= 0)
+ tlb_invalid_indexed();
+ }
+ restore_asid_inv_utlb(oldpid, newpid);
+ local_irq_restore(flags);
+ }
+#endif
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ start &= TLB_ENTRY_SIZE_MASK;
+ end += TLB_ENTRY_SIZE - 1;
+ end &= TLB_ENTRY_SIZE_MASK;
+
+#ifdef CONFIG_CPU_HAS_TLBI
+ while (start < end) {
+ asm volatile("tlbi.vaas %0"::"r"(start));
+ start += 2*PAGE_SIZE;
+ }
+ sync_is();
+#else
+ {
+ unsigned long flags, oldpid;
+
+ local_irq_save(flags);
+ oldpid = read_mmu_entryhi() & ASID_MASK;
+ while (start < end) {
+ int idx;
+
+ write_mmu_entryhi(start | oldpid);
+ start += 2*PAGE_SIZE;
+ tlb_probe();
+ idx = read_mmu_index();
+ if (idx >= 0)
+ tlb_invalid_indexed();
+ }
+ restore_asid_inv_utlb(oldpid, oldpid);
+ local_irq_restore(flags);
+ }
+#endif
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ int newpid = cpu_asid(vma->vm_mm);
+
+ addr &= TLB_ENTRY_SIZE_MASK;
+
+#ifdef CONFIG_CPU_HAS_TLBI
+ asm volatile("tlbi.vas %0"::"r"(addr | newpid));
+ sync_is();
+#else
+ {
+ int oldpid, idx;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ oldpid = read_mmu_entryhi() & ASID_MASK;
+ write_mmu_entryhi(addr | newpid);
+ tlb_probe();
+ idx = read_mmu_index();
+ if (idx >= 0)
+ tlb_invalid_indexed();
+
+ restore_asid_inv_utlb(oldpid, newpid);
+ local_irq_restore(flags);
+ }
+#endif
+}
+
+void flush_tlb_one(unsigned long addr)
+{
+ addr &= TLB_ENTRY_SIZE_MASK;
+
+#ifdef CONFIG_CPU_HAS_TLBI
+ asm volatile("tlbi.vaas %0"::"r"(addr));
+ sync_is();
+#else
+ {
+ int oldpid, idx;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ oldpid = read_mmu_entryhi() & ASID_MASK;
+ write_mmu_entryhi(addr | oldpid);
+ tlb_probe();
+ idx = read_mmu_index();
+ if (idx >= 0)
+ tlb_invalid_indexed();
+
+ restore_asid_inv_utlb(oldpid, oldpid);
+ local_irq_restore(flags);
+ }
+#endif
+}
+EXPORT_SYMBOL(flush_tlb_one);