summaryrefslogtreecommitdiffstats
path: root/arch/csky/abiv1
diff options
context:
space:
mode:
Diffstat (limited to 'arch/csky/abiv1')
-rw-r--r--arch/csky/abiv1/Makefile6
-rw-r--r--arch/csky/abiv1/alignment.c341
-rw-r--r--arch/csky/abiv1/bswapdi.c12
-rw-r--r--arch/csky/abiv1/bswapsi.c12
-rw-r--r--arch/csky/abiv1/cacheflush.c75
-rw-r--r--arch/csky/abiv1/inc/abi/cacheflush.h63
-rw-r--r--arch/csky/abiv1/inc/abi/ckmmu.h101
-rw-r--r--arch/csky/abiv1/inc/abi/elf.h26
-rw-r--r--arch/csky/abiv1/inc/abi/entry.h176
-rw-r--r--arch/csky/abiv1/inc/abi/page.h27
-rw-r--r--arch/csky/abiv1/inc/abi/pgtable-bits.h55
-rw-r--r--arch/csky/abiv1/inc/abi/reg_ops.h26
-rw-r--r--arch/csky/abiv1/inc/abi/regdef.h31
-rw-r--r--arch/csky/abiv1/inc/abi/string.h15
-rw-r--r--arch/csky/abiv1/inc/abi/switch_context.h16
-rw-r--r--arch/csky/abiv1/inc/abi/vdso.h9
-rw-r--r--arch/csky/abiv1/mmap.c71
17 files changed, 1062 insertions, 0 deletions
diff --git a/arch/csky/abiv1/Makefile b/arch/csky/abiv1/Makefile
new file mode 100644
index 0000000000..a4b2ade0fc
--- /dev/null
+++ b/arch/csky/abiv1/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CPU_NEED_SOFTALIGN) += alignment.o
+obj-y += bswapdi.o
+obj-y += bswapsi.o
+obj-y += cacheflush.o
+obj-y += mmap.o
diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c
new file mode 100644
index 0000000000..b60259daed
--- /dev/null
+++ b/arch/csky/abiv1/alignment.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+
+static int align_kern_enable = 1;
+static int align_usr_enable = 1;
+static int align_kern_count = 0;
+static int align_usr_count = 0;
+
+static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx)
+{
+ return rx == 15 ? regs->lr : *((uint32_t *)&(regs->a0) - 2 + rx);
+}
+
+static inline void put_ptreg(struct pt_regs *regs, uint32_t rx, uint32_t val)
+{
+ if (rx == 15)
+ regs->lr = val;
+ else
+ *((uint32_t *)&(regs->a0) - 2 + rx) = val;
+}
+
+/*
+ * Get byte-value from addr and set it to *valp.
+ *
+ * Success: return 0
+ * Failure: return 1
+ */
+static int ldb_asm(uint32_t addr, uint32_t *valp)
+{
+ uint32_t val;
+ int err;
+
+ asm volatile (
+ "movi %0, 0\n"
+ "1:\n"
+ "ldb %1, (%2)\n"
+ "br 3f\n"
+ "2:\n"
+ "movi %0, 1\n"
+ "br 3f\n"
+ ".section __ex_table,\"a\"\n"
+ ".align 2\n"
+ ".long 1b, 2b\n"
+ ".previous\n"
+ "3:\n"
+ : "=&r"(err), "=r"(val)
+ : "r" (addr)
+ );
+
+ *valp = val;
+
+ return err;
+}
+
+/*
+ * Put byte-value to addr.
+ *
+ * Success: return 0
+ * Failure: return 1
+ */
+static int stb_asm(uint32_t addr, uint32_t val)
+{
+ int err;
+
+ asm volatile (
+ "movi %0, 0\n"
+ "1:\n"
+ "stb %1, (%2)\n"
+ "br 3f\n"
+ "2:\n"
+ "movi %0, 1\n"
+ "br 3f\n"
+ ".section __ex_table,\"a\"\n"
+ ".align 2\n"
+ ".long 1b, 2b\n"
+ ".previous\n"
+ "3:\n"
+ : "=&r"(err)
+ : "r"(val), "r" (addr)
+ );
+
+ return err;
+}
+
+/*
+ * Get half-word from [rx + imm]
+ *
+ * Success: return 0
+ * Failure: return 1
+ */
+static int ldh_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
+{
+ uint32_t byte0, byte1;
+
+ if (ldb_asm(addr, &byte0))
+ return 1;
+ addr += 1;
+ if (ldb_asm(addr, &byte1))
+ return 1;
+
+ byte0 |= byte1 << 8;
+ put_ptreg(regs, rz, byte0);
+
+ return 0;
+}
+
+/*
+ * Store half-word to [rx + imm]
+ *
+ * Success: return 0
+ * Failure: return 1
+ */
+static int sth_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
+{
+ uint32_t byte0, byte1;
+
+ byte0 = byte1 = get_ptreg(regs, rz);
+
+ byte0 &= 0xff;
+
+ if (stb_asm(addr, byte0))
+ return 1;
+
+ addr += 1;
+ byte1 = (byte1 >> 8) & 0xff;
+ if (stb_asm(addr, byte1))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Get word from [rx + imm]
+ *
+ * Success: return 0
+ * Failure: return 1
+ */
+static int ldw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
+{
+ uint32_t byte0, byte1, byte2, byte3;
+
+ if (ldb_asm(addr, &byte0))
+ return 1;
+
+ addr += 1;
+ if (ldb_asm(addr, &byte1))
+ return 1;
+
+ addr += 1;
+ if (ldb_asm(addr, &byte2))
+ return 1;
+
+ addr += 1;
+ if (ldb_asm(addr, &byte3))
+ return 1;
+
+ byte0 |= byte1 << 8;
+ byte0 |= byte2 << 16;
+ byte0 |= byte3 << 24;
+
+ put_ptreg(regs, rz, byte0);
+
+ return 0;
+}
+
+/*
+ * Store word to [rx + imm]
+ *
+ * Success: return 0
+ * Failure: return 1
+ */
+static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
+{
+ uint32_t byte0, byte1, byte2, byte3;
+
+ byte0 = byte1 = byte2 = byte3 = get_ptreg(regs, rz);
+
+ byte0 &= 0xff;
+
+ if (stb_asm(addr, byte0))
+ return 1;
+
+ addr += 1;
+ byte1 = (byte1 >> 8) & 0xff;
+ if (stb_asm(addr, byte1))
+ return 1;
+
+ addr += 1;
+ byte2 = (byte2 >> 16) & 0xff;
+ if (stb_asm(addr, byte2))
+ return 1;
+
+ addr += 1;
+ byte3 = (byte3 >> 24) & 0xff;
+ if (stb_asm(addr, byte3))
+ return 1;
+
+ return 0;
+}
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#define OP_LDH 0xc000
+#define OP_STH 0xd000
+#define OP_LDW 0x8000
+#define OP_STW 0x9000
+
+void csky_alignment(struct pt_regs *regs)
+{
+ int ret;
+ uint16_t tmp;
+ uint32_t opcode = 0;
+ uint32_t rx = 0;
+ uint32_t rz = 0;
+ uint32_t imm = 0;
+ uint32_t addr = 0;
+
+ if (!user_mode(regs))
+ goto kernel_area;
+
+ if (!align_usr_enable) {
+ pr_err("%s user disabled.\n", __func__);
+ goto bad_area;
+ }
+
+ align_usr_count++;
+
+ ret = get_user(tmp, (uint16_t *)instruction_pointer(regs));
+ if (ret) {
+ pr_err("%s get_user failed.\n", __func__);
+ goto bad_area;
+ }
+
+ goto good_area;
+
+kernel_area:
+ if (!align_kern_enable) {
+ pr_err("%s kernel disabled.\n", __func__);
+ goto bad_area;
+ }
+
+ align_kern_count++;
+
+ tmp = *(uint16_t *)instruction_pointer(regs);
+
+good_area:
+ opcode = (uint32_t)tmp;
+
+ rx = opcode & 0xf;
+ imm = (opcode >> 4) & 0xf;
+ rz = (opcode >> 8) & 0xf;
+ opcode &= 0xf000;
+
+ if (rx == 0 || rx == 1 || rz == 0 || rz == 1)
+ goto bad_area;
+
+ switch (opcode) {
+ case OP_LDH:
+ addr = get_ptreg(regs, rx) + (imm << 1);
+ ret = ldh_c(regs, rz, addr);
+ break;
+ case OP_LDW:
+ addr = get_ptreg(regs, rx) + (imm << 2);
+ ret = ldw_c(regs, rz, addr);
+ break;
+ case OP_STH:
+ addr = get_ptreg(regs, rx) + (imm << 1);
+ ret = sth_c(regs, rz, addr);
+ break;
+ case OP_STW:
+ addr = get_ptreg(regs, rx) + (imm << 2);
+ ret = stw_c(regs, rz, addr);
+ break;
+ }
+
+ if (ret)
+ goto bad_area;
+
+ regs->pc += 2;
+
+ return;
+
+bad_area:
+ if (!user_mode(regs)) {
+ if (fixup_exception(regs))
+ return;
+
+ bust_spinlocks(1);
+ pr_alert("%s opcode: %x, rz: %d, rx: %d, imm: %d, addr: %x.\n",
+ __func__, opcode, rz, rx, imm, addr);
+ show_regs(regs);
+ bust_spinlocks(0);
+ make_task_dead(SIGKILL);
+ }
+
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
+}
+
+static struct ctl_table alignment_tbl[5] = {
+ {
+ .procname = "kernel_enable",
+ .data = &align_kern_enable,
+ .maxlen = sizeof(align_kern_enable),
+ .mode = 0666,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .procname = "user_enable",
+ .data = &align_usr_enable,
+ .maxlen = sizeof(align_usr_enable),
+ .mode = 0666,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .procname = "kernel_count",
+ .data = &align_kern_count,
+ .maxlen = sizeof(align_kern_count),
+ .mode = 0666,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .procname = "user_count",
+ .data = &align_usr_count,
+ .maxlen = sizeof(align_usr_count),
+ .mode = 0666,
+ .proc_handler = &proc_dointvec
+ },
+ {}
+};
+
+static int __init csky_alignment_init(void)
+{
+ register_sysctl_init("csky/csky_alignment", alignment_tbl);
+ return 0;
+}
+
+arch_initcall(csky_alignment_init);
diff --git a/arch/csky/abiv1/bswapdi.c b/arch/csky/abiv1/bswapdi.c
new file mode 100644
index 0000000000..f50a1d6e33
--- /dev/null
+++ b/arch/csky/abiv1/bswapdi.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/export.h>
+#include <linux/compiler.h>
+#include <uapi/linux/swab.h>
+
+unsigned long long notrace __bswapdi2(unsigned long long u)
+{
+ return ___constant_swab64(u);
+}
+EXPORT_SYMBOL(__bswapdi2);
diff --git a/arch/csky/abiv1/bswapsi.c b/arch/csky/abiv1/bswapsi.c
new file mode 100644
index 0000000000..0f79182e8a
--- /dev/null
+++ b/arch/csky/abiv1/bswapsi.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/export.h>
+#include <linux/compiler.h>
+#include <uapi/linux/swab.h>
+
+unsigned int notrace __bswapsi2(unsigned int u)
+{
+ return ___constant_swab32(u);
+}
+EXPORT_SYMBOL(__bswapsi2);
diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c
new file mode 100644
index 0000000000..171e8fb322
--- /dev/null
+++ b/arch/csky/abiv1/cacheflush.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/syscalls.h>
+#include <linux/spinlock.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+#include <asm/tlbflush.h>
+
+#define PG_dcache_clean PG_arch_1
+
+void flush_dcache_folio(struct folio *folio)
+{
+ struct address_space *mapping;
+
+ if (is_zero_pfn(folio_pfn(folio)))
+ return;
+
+ mapping = folio_flush_mapping(folio);
+
+ if (mapping && !folio_mapped(folio))
+ clear_bit(PG_dcache_clean, &folio->flags);
+ else {
+ dcache_wbinv_all();
+ if (mapping)
+ icache_inv_all();
+ set_bit(PG_dcache_clean, &folio->flags);
+ }
+}
+EXPORT_SYMBOL(flush_dcache_folio);
+
+void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
+{
+ unsigned long pfn = pte_pfn(*ptep);
+ struct folio *folio;
+
+ flush_tlb_page(vma, addr);
+
+ if (!pfn_valid(pfn))
+ return;
+
+ if (is_zero_pfn(pfn))
+ return;
+
+ folio = page_folio(pfn_to_page(pfn));
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+ dcache_wbinv_all();
+
+ if (folio_flush_mapping(folio)) {
+ if (vma->vm_flags & VM_EXEC)
+ icache_inv_all();
+ }
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ dcache_wbinv_all();
+
+ if (vma->vm_flags & VM_EXEC)
+ icache_inv_all();
+}
diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h
new file mode 100644
index 0000000000..908d8b0bc4
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/cacheflush.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_CACHEFLUSH_H
+#define __ABI_CSKY_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/string.h>
+#include <asm/cache.h>
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page *);
+void flush_dcache_folio(struct folio *);
+#define flush_dcache_folio flush_dcache_folio
+
+#define flush_cache_mm(mm) dcache_wbinv_all()
+#define flush_cache_page(vma, page, pfn) cache_wbinv_all()
+#define flush_cache_dup_mm(mm) cache_wbinv_all()
+
+#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
+
+#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
+static inline void flush_kernel_vmap_range(void *addr, int size)
+{
+ dcache_wbinv_all();
+}
+static inline void invalidate_kernel_vmap_range(void *addr, int size)
+{
+ dcache_wbinv_all();
+}
+
+#define ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vmaddr)
+{
+ if (PageAnon(page))
+ cache_wbinv_all();
+}
+
+/*
+ * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
+ * Use cache_wbinv_all() here and need to be improved in future.
+ */
+extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+#define flush_cache_vmap(start, end) cache_wbinv_all()
+#define flush_cache_vunmap(start, end) cache_wbinv_all()
+
+#define flush_icache_range(start, end) cache_wbinv_range(start, end)
+#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
+#define flush_icache_deferred(mm) do {} while (0);
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+} while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+ cache_wbinv_all(); \
+} while (0)
+
+#endif /* __ABI_CSKY_CACHEFLUSH_H */
diff --git a/arch/csky/abiv1/inc/abi/ckmmu.h b/arch/csky/abiv1/inc/abi/ckmmu.h
new file mode 100644
index 0000000000..416b30c579
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/ckmmu.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CKMMUV1_H
+#define __ASM_CSKY_CKMMUV1_H
+#include <abi/reg_ops.h>
+
+static inline int read_mmu_index(void)
+{
+ return cprcr("cpcr0");
+}
+
+static inline void write_mmu_index(int value)
+{
+ cpwcr("cpcr0", value);
+}
+
+static inline int read_mmu_entrylo0(void)
+{
+ return cprcr("cpcr2") << 6;
+}
+
+static inline int read_mmu_entrylo1(void)
+{
+ return cprcr("cpcr3") << 6;
+}
+
+static inline void write_mmu_pagemask(int value)
+{
+ cpwcr("cpcr6", value);
+}
+
+static inline int read_mmu_entryhi(void)
+{
+ return cprcr("cpcr4");
+}
+
+static inline void write_mmu_entryhi(int value)
+{
+ cpwcr("cpcr4", value);
+}
+
+static inline unsigned long read_mmu_msa0(void)
+{
+ return cprcr("cpcr30");
+}
+
+static inline void write_mmu_msa0(unsigned long value)
+{
+ cpwcr("cpcr30", value);
+}
+
+static inline unsigned long read_mmu_msa1(void)
+{
+ return cprcr("cpcr31");
+}
+
+static inline void write_mmu_msa1(unsigned long value)
+{
+ cpwcr("cpcr31", value);
+}
+
+/*
+ * TLB operations.
+ */
+static inline void tlb_probe(void)
+{
+ cpwcr("cpcr8", 0x80000000);
+}
+
+static inline void tlb_read(void)
+{
+ cpwcr("cpcr8", 0x40000000);
+}
+
+static inline void tlb_invalid_all(void)
+{
+ cpwcr("cpcr8", 0x04000000);
+}
+
+
+static inline void local_tlb_invalid_all(void)
+{
+ tlb_invalid_all();
+}
+
+static inline void tlb_invalid_indexed(void)
+{
+ cpwcr("cpcr8", 0x02000000);
+}
+
+static inline void setup_pgd(pgd_t *pgd, int asid)
+{
+ cpwcr("cpcr29", __pa(pgd) | BIT(0));
+ write_mmu_entryhi(asid);
+}
+
+static inline pgd_t *get_pgd(void)
+{
+ return __va(cprcr("cpcr29") & ~BIT(0));
+}
+#endif /* __ASM_CSKY_CKMMUV1_H */
diff --git a/arch/csky/abiv1/inc/abi/elf.h b/arch/csky/abiv1/inc/abi/elf.h
new file mode 100644
index 0000000000..3058cc06b1
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/elf.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_ELF_H
+#define __ABI_CSKY_ELF_H
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
+ pr_reg[0] = regs->pc; \
+ pr_reg[1] = regs->regs[9]; \
+ pr_reg[2] = regs->usp; \
+ pr_reg[3] = regs->sr; \
+ pr_reg[4] = regs->a0; \
+ pr_reg[5] = regs->a1; \
+ pr_reg[6] = regs->a2; \
+ pr_reg[7] = regs->a3; \
+ pr_reg[8] = regs->regs[0]; \
+ pr_reg[9] = regs->regs[1]; \
+ pr_reg[10] = regs->regs[2]; \
+ pr_reg[11] = regs->regs[3]; \
+ pr_reg[12] = regs->regs[4]; \
+ pr_reg[13] = regs->regs[5]; \
+ pr_reg[14] = regs->regs[6]; \
+ pr_reg[15] = regs->regs[7]; \
+ pr_reg[16] = regs->regs[8]; \
+ pr_reg[17] = regs->lr; \
+} while (0);
+#endif /* __ABI_CSKY_ELF_H */
diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h
new file mode 100644
index 0000000000..b6a2109b89
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/entry.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_ENTRY_H
+#define __ASM_CSKY_ENTRY_H
+
+#include <asm/setup.h>
+#include <abi/regdef.h>
+
+#define LSAVE_PC 8
+#define LSAVE_PSR 12
+#define LSAVE_A0 24
+#define LSAVE_A1 28
+#define LSAVE_A2 32
+#define LSAVE_A3 36
+#define LSAVE_A4 40
+#define LSAVE_A5 44
+
+#define usp ss1
+
+.macro USPTOKSP
+ mtcr sp, usp
+ mfcr sp, ss0
+.endm
+
+.macro KSPTOUSP
+ mtcr sp, ss0
+ mfcr sp, usp
+.endm
+
+.macro SAVE_ALL epc_inc
+ mtcr r13, ss2
+ mfcr r13, epsr
+ btsti r13, 31
+ bt 1f
+ USPTOKSP
+1:
+ subi sp, 32
+ subi sp, 32
+ subi sp, 16
+ stw r13, (sp, 12)
+
+ stw lr, (sp, 4)
+
+ mfcr lr, epc
+ movi r13, \epc_inc
+ add lr, r13
+ stw lr, (sp, 8)
+
+ mov lr, sp
+ addi lr, 32
+ addi lr, 32
+ addi lr, 16
+ bt 2f
+ mfcr lr, ss1
+2:
+ stw lr, (sp, 16)
+
+ stw a0, (sp, 20)
+ stw a0, (sp, 24)
+ stw a1, (sp, 28)
+ stw a2, (sp, 32)
+ stw a3, (sp, 36)
+
+ addi sp, 32
+ addi sp, 8
+ mfcr r13, ss2
+ stw r6, (sp)
+ stw r7, (sp, 4)
+ stw r8, (sp, 8)
+ stw r9, (sp, 12)
+ stw r10, (sp, 16)
+ stw r11, (sp, 20)
+ stw r12, (sp, 24)
+ stw r13, (sp, 28)
+ stw r14, (sp, 32)
+ stw r1, (sp, 36)
+ subi sp, 32
+ subi sp, 8
+.endm
+
+.macro RESTORE_ALL
+ ldw lr, (sp, 4)
+ ldw a0, (sp, 8)
+ mtcr a0, epc
+ ldw a0, (sp, 12)
+ mtcr a0, epsr
+ btsti a0, 31
+ bt 1f
+ ldw a0, (sp, 16)
+ mtcr a0, ss1
+1:
+ ldw a0, (sp, 24)
+ ldw a1, (sp, 28)
+ ldw a2, (sp, 32)
+ ldw a3, (sp, 36)
+
+ addi sp, 32
+ addi sp, 8
+ ldw r6, (sp)
+ ldw r7, (sp, 4)
+ ldw r8, (sp, 8)
+ ldw r9, (sp, 12)
+ ldw r10, (sp, 16)
+ ldw r11, (sp, 20)
+ ldw r12, (sp, 24)
+ ldw r13, (sp, 28)
+ ldw r14, (sp, 32)
+ ldw r1, (sp, 36)
+ addi sp, 32
+ addi sp, 8
+
+ bt 2f
+ KSPTOUSP
+2:
+ rte
+.endm
+
+.macro SAVE_SWITCH_STACK
+ subi sp, 32
+ stm r8-r15, (sp)
+.endm
+
+.macro RESTORE_SWITCH_STACK
+ ldm r8-r15, (sp)
+ addi sp, 32
+.endm
+
+/* MMU registers operators. */
+.macro RD_MIR rx
+ cprcr \rx, cpcr0
+.endm
+
+.macro RD_MEH rx
+ cprcr \rx, cpcr4
+.endm
+
+.macro RD_MCIR rx
+ cprcr \rx, cpcr8
+.endm
+
+.macro RD_PGDR rx
+ cprcr \rx, cpcr29
+.endm
+
+.macro WR_MEH rx
+ cpwcr \rx, cpcr4
+.endm
+
+.macro WR_MCIR rx
+ cpwcr \rx, cpcr8
+.endm
+
+.macro SETUP_MMU
+ /* Init psr and enable ee */
+ lrw r6, DEFAULT_PSR_VALUE
+ mtcr r6, psr
+ psrset ee
+
+ /* Select MMU as co-processor */
+ cpseti cp15
+
+ /*
+ * cpcr30 format:
+ * 31 - 29 | 28 - 4 | 3 | 2 | 1 | 0
+ * BA Reserved C D V
+ */
+ cprcr r6, cpcr30
+ lsri r6, 29
+ lsli r6, 29
+ addi r6, 0xe
+ cpwcr r6, cpcr30
+
+ movi r6, 0
+ cpwcr r6, cpcr31
+.endm
+#endif /* __ASM_CSKY_ENTRY_H */
diff --git a/arch/csky/abiv1/inc/abi/page.h b/arch/csky/abiv1/inc/abi/page.h
new file mode 100644
index 0000000000..2d2159933b
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/page.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/shmparam.h>
+
+extern void flush_dcache_page(struct page *page);
+
+static inline unsigned long pages_do_alias(unsigned long addr1,
+ unsigned long addr2)
+{
+ return (addr1 ^ addr2) & (SHMLBA-1);
+}
+
+static inline void clear_user_page(void *addr, unsigned long vaddr,
+ struct page *page)
+{
+ clear_page(addr);
+ if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
+ flush_dcache_page(page);
+}
+
+static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
+ struct page *page)
+{
+ copy_page(to, from);
+ if (pages_do_alias((unsigned long) to, vaddr & PAGE_MASK))
+ flush_dcache_page(page);
+}
diff --git a/arch/csky/abiv1/inc/abi/pgtable-bits.h b/arch/csky/abiv1/inc/abi/pgtable-bits.h
new file mode 100644
index 0000000000..ae7a2f76dd
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/pgtable-bits.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PGTABLE_BITS_H
+#define __ASM_CSKY_PGTABLE_BITS_H
+
+/* implemented in software */
+#define _PAGE_PRESENT (1<<0)
+#define _PAGE_READ (1<<1)
+#define _PAGE_WRITE (1<<2)
+#define _PAGE_ACCESSED (1<<3)
+#define _PAGE_MODIFIED (1<<4)
+
+/* We borrow bit 9 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE (1<<9)
+
+/* implemented in hardware */
+#define _PAGE_GLOBAL (1<<6)
+#define _PAGE_VALID (1<<7)
+#define _PAGE_DIRTY (1<<8)
+
+#define _PAGE_CACHE (3<<9)
+#define _PAGE_UNCACHE (2<<9)
+#define _PAGE_SO _PAGE_UNCACHE
+#define _CACHE_MASK (7<<9)
+
+#define _CACHE_CACHED _PAGE_CACHE
+#define _CACHE_UNCACHED _PAGE_UNCACHE
+
+#define _PAGE_PROT_NONE _PAGE_READ
+
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTE:
+ * bit 0: _PAGE_PRESENT (zero)
+ * bit 1: _PAGE_READ (zero)
+ * bit 2 - 5: swap type[0 - 3]
+ * bit 6: _PAGE_GLOBAL (zero)
+ * bit 7: _PAGE_VALID (zero)
+ * bit 8: swap type[4]
+ * bit 9: exclusive marker
+ * bit 10 - 31: swap offset
+ */
+#define __swp_type(x) ((((x).val >> 2) & 0xf) | \
+ (((x).val >> 4) & 0x10))
+#define __swp_offset(x) ((x).val >> 10)
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type & 0xf) << 2) | \
+ ((type & 0x10) << 4) | \
+ ((offset) << 10)})
+
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#endif /* __ASM_CSKY_PGTABLE_BITS_H */
diff --git a/arch/csky/abiv1/inc/abi/reg_ops.h b/arch/csky/abiv1/inc/abi/reg_ops.h
new file mode 100644
index 0000000000..abd01a2433
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/reg_ops.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_REG_OPS_H
+#define __ABI_REG_OPS_H
+#include <asm/reg_ops.h>
+
+#define cprcr(reg) \
+({ \
+ unsigned int tmp; \
+ asm volatile("cprcr %0, "reg"\n":"=b"(tmp)); \
+ tmp; \
+})
+
+#define cpwcr(reg, val) \
+({ \
+ asm volatile("cpwcr %0, "reg"\n"::"b"(val)); \
+})
+
+static inline unsigned int mfcr_hint(void)
+{
+ return mfcr("cr30");
+}
+
+static inline unsigned int mfcr_ccr2(void) { return 0; }
+
+#endif /* __ABI_REG_OPS_H */
diff --git a/arch/csky/abiv1/inc/abi/regdef.h b/arch/csky/abiv1/inc/abi/regdef.h
new file mode 100644
index 0000000000..7b386fd670
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/regdef.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_REGDEF_H
+#define __ASM_CSKY_REGDEF_H
+
+#ifdef __ASSEMBLY__
+#define syscallid r1
+#else
+#define syscallid "r1"
+#endif
+
+#define regs_syscallid(regs) regs->regs[9]
+#define regs_fp(regs) regs->regs[2]
+
+/*
+ * PSR format:
+ * | 31 | 30-24 | 23-16 | 15 14 | 13-0 |
+ * S CPID VEC TM
+ *
+ * S: Super Mode
+ * CPID: Coprocessor id, only 15 for MMU
+ * VEC: Exception Number
+ * TM: Trace Mode
+ */
+#define DEFAULT_PSR_VALUE 0x8f000000
+
+#define SYSTRACE_SAVENUM 2
+
+#define TRAP0_SIZE 2
+
+#endif /* __ASM_CSKY_REGDEF_H */
diff --git a/arch/csky/abiv1/inc/abi/string.h b/arch/csky/abiv1/inc/abi/string.h
new file mode 100644
index 0000000000..de50117b90
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/string.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_STRING_H
+#define __ABI_CSKY_STRING_H
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *, int, __kernel_size_t);
+
+#endif /* __ABI_CSKY_STRING_H */
diff --git a/arch/csky/abiv1/inc/abi/switch_context.h b/arch/csky/abiv1/inc/abi/switch_context.h
new file mode 100644
index 0000000000..ec73fd7c9f
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/switch_context.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_PTRACE_H
+#define __ABI_CSKY_PTRACE_H
+
+struct switch_stack {
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+};
+#endif /* __ABI_CSKY_PTRACE_H */
diff --git a/arch/csky/abiv1/inc/abi/vdso.h b/arch/csky/abiv1/inc/abi/vdso.h
new file mode 100644
index 0000000000..9e6d0a2fdd
--- /dev/null
+++ b/arch/csky/abiv1/inc/abi/vdso.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_VDSO_H
+#define __ABI_CSKY_VDSO_H
+
+/* movi r1, 127; addi r1, (139 - 127) */
+#define SET_SYSCALL_ID .long 0x20b167f1
+
+#endif /* __ABI_CSKY_VDSO_H */
diff --git a/arch/csky/abiv1/mmap.c b/arch/csky/abiv1/mmap.c
new file mode 100644
index 0000000000..6792aca499
--- /dev/null
+++ b/arch/csky/abiv1/mmap.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/shm.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+#include <linux/io.h>
+
+#define COLOUR_ALIGN(addr,pgoff) \
+ ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
+ (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+
+/*
+ * We need to ensure that shared mappings are correctly aligned to
+ * avoid aliasing issues with VIPT caches. We need to ensure that
+ * a specific page of an object is always mapped at a multiple of
+ * SHMLBA bytes.
+ *
+ * We unconditionally provide this function for all cases.
+ */
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int do_align = 0;
+ struct vm_unmapped_area_info info;
+
+ /*
+ * We only need to do colour alignment if either the I or D
+ * caches alias.
+ */
+ do_align = filp || (flags & MAP_SHARED);
+
+ /*
+ * We enforce the MAP_FIXED case.
+ */
+ if (flags & MAP_FIXED) {
+ if (flags & MAP_SHARED &&
+ (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ return vm_unmapped_area(&info);
+}