summaryrefslogtreecommitdiffstats
path: root/arch/csky/abiv2
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/csky/abiv2/Makefile14
-rw-r--r--arch/csky/abiv2/cacheflush.c88
-rw-r--r--arch/csky/abiv2/fpu.c270
-rw-r--r--arch/csky/abiv2/inc/abi/cacheflush.h55
-rw-r--r--arch/csky/abiv2/inc/abi/ckmmu.h139
-rw-r--r--arch/csky/abiv2/inc/abi/elf.h43
-rw-r--r--arch/csky/abiv2/inc/abi/entry.h314
-rw-r--r--arch/csky/abiv2/inc/abi/fpu.h66
-rw-r--r--arch/csky/abiv2/inc/abi/page.h13
-rw-r--r--arch/csky/abiv2/inc/abi/pgtable-bits.h48
-rw-r--r--arch/csky/abiv2/inc/abi/reg_ops.h16
-rw-r--r--arch/csky/abiv2/inc/abi/regdef.h31
-rw-r--r--arch/csky/abiv2/inc/abi/string.h27
-rw-r--r--arch/csky/abiv2/inc/abi/switch_context.h31
-rw-r--r--arch/csky/abiv2/inc/abi/vdso.h9
-rw-r--r--arch/csky/abiv2/mcount.S211
-rw-r--r--arch/csky/abiv2/memcmp.S152
-rw-r--r--arch/csky/abiv2/memcpy.S104
-rw-r--r--arch/csky/abiv2/memmove.S104
-rw-r--r--arch/csky/abiv2/memset.S83
-rw-r--r--arch/csky/abiv2/strcmp.S168
-rw-r--r--arch/csky/abiv2/strcpy.S123
-rw-r--r--arch/csky/abiv2/strksyms.c14
-rw-r--r--arch/csky/abiv2/strlen.S97
-rw-r--r--arch/csky/abiv2/sysdep.h29
25 files changed, 2249 insertions, 0 deletions
diff --git a/arch/csky/abiv2/Makefile b/arch/csky/abiv2/Makefile
new file mode 100644
index 000000000..ea8005fe0
--- /dev/null
+++ b/arch/csky/abiv2/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += cacheflush.o
+obj-$(CONFIG_CPU_HAS_FPU) += fpu.o
+obj-y += memcmp.o
+ifeq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_STRING_OPS), y)
+obj-y += memcpy.o
+obj-y += memmove.o
+obj-y += memset.o
+endif
+obj-y += strcmp.o
+obj-y += strcpy.o
+obj-y += strlen.o
+obj-y += strksyms.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
new file mode 100644
index 000000000..39c51399d
--- /dev/null
+++ b/arch/csky/abiv2/cacheflush.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/cache.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <asm/cache.h>
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte)
+{
+ unsigned long addr;
+ struct page *page;
+
+ if (!pfn_valid(pte_pfn(*pte)))
+ return;
+
+ page = pfn_to_page(pte_pfn(*pte));
+ if (page == ZERO_PAGE(0))
+ return;
+
+ if (test_and_set_bit(PG_dcache_clean, &page->flags))
+ return;
+
+ addr = (unsigned long) kmap_atomic(page);
+
+ dcache_wb_range(addr, addr + PAGE_SIZE);
+
+ if (vma->vm_flags & VM_EXEC)
+ icache_inv_range(addr, addr + PAGE_SIZE);
+
+ kunmap_atomic((void *) addr);
+}
+
+void flush_icache_deferred(struct mm_struct *mm)
+{
+ unsigned int cpu = smp_processor_id();
+ cpumask_t *mask = &mm->context.icache_stale_mask;
+
+ if (cpumask_test_cpu(cpu, mask)) {
+ cpumask_clear_cpu(cpu, mask);
+ /*
+ * Ensure the remote hart's writes are visible to this hart.
+ * This pairs with a barrier in flush_icache_mm.
+ */
+ smp_mb();
+ local_icache_inv_all(NULL);
+ }
+}
+
+void flush_icache_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ unsigned int cpu;
+ cpumask_t others, *mask;
+
+ preempt_disable();
+
+#ifdef CONFIG_CPU_HAS_ICACHE_INS
+ if (mm == current->mm) {
+ icache_inv_range(start, end);
+ preempt_enable();
+ return;
+ }
+#endif
+
+ /* Mark every hart's icache as needing a flush for this MM. */
+ mask = &mm->context.icache_stale_mask;
+ cpumask_setall(mask);
+
+ /* Flush this hart's I$ now, and mark it as flushed. */
+ cpu = smp_processor_id();
+ cpumask_clear_cpu(cpu, mask);
+ local_icache_inv_all(NULL);
+
+ /*
+ * Flush the I$ of other harts concurrently executing, and mark them as
+ * flushed.
+ */
+ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+
+ if (mm != current->active_mm || !cpumask_empty(&others)) {
+ on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
+ cpumask_clear(mask);
+ }
+
+ preempt_enable();
+}
diff --git a/arch/csky/abiv2/fpu.c b/arch/csky/abiv2/fpu.c
new file mode 100644
index 000000000..5acc5c2e5
--- /dev/null
+++ b/arch/csky/abiv2/fpu.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <abi/reg_ops.h>
+
+#define MTCR_MASK 0xFC00FFE0
+#define MFCR_MASK 0xFC00FFE0
+#define MTCR_DIST 0xC0006420
+#define MFCR_DIST 0xC0006020
+
+/*
+ * fpu_libc_helper() is to help libc to excute:
+ * - mfcr %a, cr<1, 2>
+ * - mfcr %a, cr<2, 2>
+ * - mtcr %a, cr<1, 2>
+ * - mtcr %a, cr<2, 2>
+ */
+int fpu_libc_helper(struct pt_regs *regs)
+{
+ int fault;
+ unsigned long instrptr, regx = 0;
+ unsigned long index = 0, tmp = 0;
+ unsigned long tinstr = 0;
+ u16 instr_hi, instr_low;
+
+ instrptr = instruction_pointer(regs);
+ if (instrptr & 1)
+ return 0;
+
+ fault = __get_user(instr_low, (u16 *)instrptr);
+ if (fault)
+ return 0;
+
+ fault = __get_user(instr_hi, (u16 *)(instrptr + 2));
+ if (fault)
+ return 0;
+
+ tinstr = instr_hi | ((unsigned long)instr_low << 16);
+
+ if (((tinstr >> 21) & 0x1F) != 2)
+ return 0;
+
+ if ((tinstr & MTCR_MASK) == MTCR_DIST) {
+ index = (tinstr >> 16) & 0x1F;
+ if (index > 13)
+ return 0;
+
+ tmp = tinstr & 0x1F;
+ if (tmp > 2)
+ return 0;
+
+ regx = *(&regs->a0 + index);
+
+ if (tmp == 1)
+ mtcr("cr<1, 2>", regx);
+ else if (tmp == 2)
+ mtcr("cr<2, 2>", regx);
+ else
+ return 0;
+
+ regs->pc += 4;
+ return 1;
+ }
+
+ if ((tinstr & MFCR_MASK) == MFCR_DIST) {
+ index = tinstr & 0x1F;
+ if (index > 13)
+ return 0;
+
+ tmp = ((tinstr >> 16) & 0x1F);
+ if (tmp > 2)
+ return 0;
+
+ if (tmp == 1)
+ regx = mfcr("cr<1, 2>");
+ else if (tmp == 2)
+ regx = mfcr("cr<2, 2>");
+ else
+ return 0;
+
+ *(&regs->a0 + index) = regx;
+
+ regs->pc += 4;
+ return 1;
+ }
+
+ return 0;
+}
+
+void fpu_fpe(struct pt_regs *regs)
+{
+ int sig, code;
+ unsigned int fesr;
+
+ fesr = mfcr("cr<2, 2>");
+
+ sig = SIGFPE;
+ code = FPE_FLTUNK;
+
+ if (fesr & FPE_ILLE) {
+ sig = SIGILL;
+ code = ILL_ILLOPC;
+ } else if (fesr & FPE_IDC) {
+ sig = SIGILL;
+ code = ILL_ILLOPN;
+ } else if (fesr & FPE_FEC) {
+ sig = SIGFPE;
+ if (fesr & FPE_IOC)
+ code = FPE_FLTINV;
+ else if (fesr & FPE_DZC)
+ code = FPE_FLTDIV;
+ else if (fesr & FPE_UFC)
+ code = FPE_FLTUND;
+ else if (fesr & FPE_OFC)
+ code = FPE_FLTOVF;
+ else if (fesr & FPE_IXC)
+ code = FPE_FLTRES;
+ }
+
+ force_sig_fault(sig, code, (void __user *)regs->pc);
+}
+
+#define FMFVR_FPU_REGS(vrx, vry) \
+ "fmfvrl %0, "#vrx"\n" \
+ "fmfvrh %1, "#vrx"\n" \
+ "fmfvrl %2, "#vry"\n" \
+ "fmfvrh %3, "#vry"\n"
+
+#define FMTVR_FPU_REGS(vrx, vry) \
+ "fmtvrl "#vrx", %0\n" \
+ "fmtvrh "#vrx", %1\n" \
+ "fmtvrl "#vry", %2\n" \
+ "fmtvrh "#vry", %3\n"
+
+#define STW_FPU_REGS(a, b, c, d) \
+ "stw %0, (%4, "#a")\n" \
+ "stw %1, (%4, "#b")\n" \
+ "stw %2, (%4, "#c")\n" \
+ "stw %3, (%4, "#d")\n"
+
+#define LDW_FPU_REGS(a, b, c, d) \
+ "ldw %0, (%4, "#a")\n" \
+ "ldw %1, (%4, "#b")\n" \
+ "ldw %2, (%4, "#c")\n" \
+ "ldw %3, (%4, "#d")\n"
+
+void save_to_user_fp(struct user_fp *user_fp)
+{
+ unsigned long flg;
+ unsigned long tmp1, tmp2;
+ unsigned long *fpregs;
+
+ local_irq_save(flg);
+
+ tmp1 = mfcr("cr<1, 2>");
+ tmp2 = mfcr("cr<2, 2>");
+
+ user_fp->fcr = tmp1;
+ user_fp->fesr = tmp2;
+
+ fpregs = &user_fp->vr[0];
+#ifdef CONFIG_CPU_HAS_FPUV2
+#ifdef CONFIG_CPU_HAS_VDSP
+ asm volatile(
+ "vstmu.32 vr0-vr3, (%0)\n"
+ "vstmu.32 vr4-vr7, (%0)\n"
+ "vstmu.32 vr8-vr11, (%0)\n"
+ "vstmu.32 vr12-vr15, (%0)\n"
+ "fstmu.64 vr16-vr31, (%0)\n"
+ : "+a"(fpregs)
+ ::"memory");
+#else
+ asm volatile(
+ "fstmu.64 vr0-vr31, (%0)\n"
+ : "+a"(fpregs)
+ ::"memory");
+#endif
+#else
+ {
+ unsigned long tmp3, tmp4;
+
+ asm volatile(
+ FMFVR_FPU_REGS(vr0, vr1)
+ STW_FPU_REGS(0, 4, 16, 20)
+ FMFVR_FPU_REGS(vr2, vr3)
+ STW_FPU_REGS(32, 36, 48, 52)
+ FMFVR_FPU_REGS(vr4, vr5)
+ STW_FPU_REGS(64, 68, 80, 84)
+ FMFVR_FPU_REGS(vr6, vr7)
+ STW_FPU_REGS(96, 100, 112, 116)
+ "addi %4, 128\n"
+ FMFVR_FPU_REGS(vr8, vr9)
+ STW_FPU_REGS(0, 4, 16, 20)
+ FMFVR_FPU_REGS(vr10, vr11)
+ STW_FPU_REGS(32, 36, 48, 52)
+ FMFVR_FPU_REGS(vr12, vr13)
+ STW_FPU_REGS(64, 68, 80, 84)
+ FMFVR_FPU_REGS(vr14, vr15)
+ STW_FPU_REGS(96, 100, 112, 116)
+ : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3),
+ "=a"(tmp4), "+a"(fpregs)
+ ::"memory");
+ }
+#endif
+
+ local_irq_restore(flg);
+}
+
+void restore_from_user_fp(struct user_fp *user_fp)
+{
+ unsigned long flg;
+ unsigned long tmp1, tmp2;
+ unsigned long *fpregs;
+
+ local_irq_save(flg);
+
+ tmp1 = user_fp->fcr;
+ tmp2 = user_fp->fesr;
+
+ mtcr("cr<1, 2>", tmp1);
+ mtcr("cr<2, 2>", tmp2);
+
+ fpregs = &user_fp->vr[0];
+#ifdef CONFIG_CPU_HAS_FPUV2
+#ifdef CONFIG_CPU_HAS_VDSP
+ asm volatile(
+ "vldmu.32 vr0-vr3, (%0)\n"
+ "vldmu.32 vr4-vr7, (%0)\n"
+ "vldmu.32 vr8-vr11, (%0)\n"
+ "vldmu.32 vr12-vr15, (%0)\n"
+ "fldmu.64 vr16-vr31, (%0)\n"
+ : "+a"(fpregs)
+ ::"memory");
+#else
+ asm volatile(
+ "fldmu.64 vr0-vr31, (%0)\n"
+ : "+a"(fpregs)
+ ::"memory");
+#endif
+#else
+ {
+ unsigned long tmp3, tmp4;
+
+ asm volatile(
+ LDW_FPU_REGS(0, 4, 16, 20)
+ FMTVR_FPU_REGS(vr0, vr1)
+ LDW_FPU_REGS(32, 36, 48, 52)
+ FMTVR_FPU_REGS(vr2, vr3)
+ LDW_FPU_REGS(64, 68, 80, 84)
+ FMTVR_FPU_REGS(vr4, vr5)
+ LDW_FPU_REGS(96, 100, 112, 116)
+ FMTVR_FPU_REGS(vr6, vr7)
+ "addi %4, 128\n"
+ LDW_FPU_REGS(0, 4, 16, 20)
+ FMTVR_FPU_REGS(vr8, vr9)
+ LDW_FPU_REGS(32, 36, 48, 52)
+ FMTVR_FPU_REGS(vr10, vr11)
+ LDW_FPU_REGS(64, 68, 80, 84)
+ FMTVR_FPU_REGS(vr12, vr13)
+ LDW_FPU_REGS(96, 100, 112, 116)
+ FMTVR_FPU_REGS(vr14, vr15)
+ : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3),
+ "=a"(tmp4), "+a"(fpregs)
+ ::"memory");
+ }
+#endif
+ local_irq_restore(flg);
+}
diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h
new file mode 100644
index 000000000..a565e00c3
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/cacheflush.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_CACHEFLUSH_H
+#define __ABI_CSKY_CACHEFLUSH_H
+
+/* Keep includes the same across arches. */
+#include <linux/mm.h>
+
+/*
+ * The cache doesn't need to be flushed when TLB entries change when
+ * the cache is mapped to physical memory, not virtual memory
+ */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+
+#define PG_dcache_clean PG_arch_1
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+ if (test_bit(PG_dcache_clean, &page->flags))
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_icache_page(vma, page) do { } while (0)
+
+#define flush_icache_range(start, end) cache_wbinv_range(start, end)
+
+void flush_icache_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+void flush_icache_deferred(struct mm_struct *mm);
+
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+ if (vma->vm_flags & VM_EXEC) { \
+ dcache_wb_range((unsigned long)dst, \
+ (unsigned long)dst + len); \
+ flush_icache_mm_range(current->mm, \
+ (unsigned long)dst, \
+ (unsigned long)dst + len); \
+ } \
+} while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif /* __ABI_CSKY_CACHEFLUSH_H */
diff --git a/arch/csky/abiv2/inc/abi/ckmmu.h b/arch/csky/abiv2/inc/abi/ckmmu.h
new file mode 100644
index 000000000..64215f238
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/ckmmu.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CKMMUV2_H
+#define __ASM_CSKY_CKMMUV2_H
+
+#include <abi/reg_ops.h>
+#include <asm/barrier.h>
+
+static inline int read_mmu_index(void)
+{
+ return mfcr("cr<0, 15>");
+}
+
+static inline void write_mmu_index(int value)
+{
+ mtcr("cr<0, 15>", value);
+}
+
+static inline int read_mmu_entrylo0(void)
+{
+ return mfcr("cr<2, 15>");
+}
+
+static inline int read_mmu_entrylo1(void)
+{
+ return mfcr("cr<3, 15>");
+}
+
+static inline void write_mmu_pagemask(int value)
+{
+ mtcr("cr<6, 15>", value);
+}
+
+static inline int read_mmu_entryhi(void)
+{
+ return mfcr("cr<4, 15>");
+}
+
+static inline void write_mmu_entryhi(int value)
+{
+ mtcr("cr<4, 15>", value);
+}
+
+static inline unsigned long read_mmu_msa0(void)
+{
+ return mfcr("cr<30, 15>");
+}
+
+static inline void write_mmu_msa0(unsigned long value)
+{
+ mtcr("cr<30, 15>", value);
+}
+
+static inline unsigned long read_mmu_msa1(void)
+{
+ return mfcr("cr<31, 15>");
+}
+
+static inline void write_mmu_msa1(unsigned long value)
+{
+ mtcr("cr<31, 15>", value);
+}
+
+/*
+ * TLB operations.
+ */
+static inline void tlb_probe(void)
+{
+ mtcr("cr<8, 15>", 0x80000000);
+}
+
+static inline void tlb_read(void)
+{
+ mtcr("cr<8, 15>", 0x40000000);
+}
+
+static inline void tlb_invalid_all(void)
+{
+#ifdef CONFIG_CPU_HAS_TLBI
+ sync_is();
+ asm volatile(
+ "tlbi.alls \n"
+ "sync.i \n"
+ :
+ :
+ : "memory");
+#else
+ mtcr("cr<8, 15>", 0x04000000);
+#endif
+}
+
+static inline void local_tlb_invalid_all(void)
+{
+#ifdef CONFIG_CPU_HAS_TLBI
+ sync_is();
+ asm volatile(
+ "tlbi.all \n"
+ "sync.i \n"
+ :
+ :
+ : "memory");
+#else
+ tlb_invalid_all();
+#endif
+}
+
+static inline void tlb_invalid_indexed(void)
+{
+ mtcr("cr<8, 15>", 0x02000000);
+}
+
+#define NOP32 ".long 0x4820c400\n"
+
+static inline void setup_pgd(pgd_t *pgd, int asid)
+{
+#ifdef CONFIG_CPU_HAS_TLBI
+ sync_is();
+#else
+ mb();
+#endif
+ asm volatile(
+#ifdef CONFIG_CPU_HAS_TLBI
+ "mtcr %1, cr<28, 15> \n"
+#endif
+ "mtcr %1, cr<29, 15> \n"
+ "mtcr %0, cr< 4, 15> \n"
+ ".rept 64 \n"
+ NOP32
+ ".endr \n"
+ :
+ :"r"(asid), "r"(__pa(pgd) | BIT(0))
+ :"memory");
+}
+
+static inline pgd_t *get_pgd(void)
+{
+ return __va(mfcr("cr<29, 15>") & ~BIT(0));
+}
+#endif /* __ASM_CSKY_CKMMUV2_H */
diff --git a/arch/csky/abiv2/inc/abi/elf.h b/arch/csky/abiv2/inc/abi/elf.h
new file mode 100644
index 000000000..290f49ef4
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/elf.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_ELF_H
+#define __ABI_CSKY_ELF_H
+
+/* The member sort in array pr_reg[x] is defined by GDB. */
+#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \
+ pr_reg[0] = regs->pc; \
+ pr_reg[1] = regs->a1; \
+ pr_reg[2] = regs->a0; \
+ pr_reg[3] = regs->sr; \
+ pr_reg[4] = regs->a2; \
+ pr_reg[5] = regs->a3; \
+ pr_reg[6] = regs->regs[0]; \
+ pr_reg[7] = regs->regs[1]; \
+ pr_reg[8] = regs->regs[2]; \
+ pr_reg[9] = regs->regs[3]; \
+ pr_reg[10] = regs->regs[4]; \
+ pr_reg[11] = regs->regs[5]; \
+ pr_reg[12] = regs->regs[6]; \
+ pr_reg[13] = regs->regs[7]; \
+ pr_reg[14] = regs->regs[8]; \
+ pr_reg[15] = regs->regs[9]; \
+ pr_reg[16] = regs->usp; \
+ pr_reg[17] = regs->lr; \
+ pr_reg[18] = regs->exregs[0]; \
+ pr_reg[19] = regs->exregs[1]; \
+ pr_reg[20] = regs->exregs[2]; \
+ pr_reg[21] = regs->exregs[3]; \
+ pr_reg[22] = regs->exregs[4]; \
+ pr_reg[23] = regs->exregs[5]; \
+ pr_reg[24] = regs->exregs[6]; \
+ pr_reg[25] = regs->exregs[7]; \
+ pr_reg[26] = regs->exregs[8]; \
+ pr_reg[27] = regs->exregs[9]; \
+ pr_reg[28] = regs->exregs[10]; \
+ pr_reg[29] = regs->exregs[11]; \
+ pr_reg[30] = regs->exregs[12]; \
+ pr_reg[31] = regs->exregs[13]; \
+ pr_reg[32] = regs->exregs[14]; \
+ pr_reg[33] = regs->tls; \
+} while (0);
+#endif /* __ABI_CSKY_ELF_H */
diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
new file mode 100644
index 000000000..cca63e699
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/entry.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_ENTRY_H
+#define __ASM_CSKY_ENTRY_H
+
+#include <asm/setup.h>
+#include <abi/regdef.h>
+
+#define LSAVE_PC 8
+#define LSAVE_PSR 12
+#define LSAVE_A0 24
+#define LSAVE_A1 28
+#define LSAVE_A2 32
+#define LSAVE_A3 36
+#define LSAVE_A4 40
+#define LSAVE_A5 44
+
+#define KSPTOUSP
+#define USPTOKSP
+
+#define usp cr<14, 1>
+
+.macro SAVE_ALL epc_inc
+ subi sp, 152
+ stw tls, (sp, 0)
+ stw lr, (sp, 4)
+
+ RD_MEH lr
+ WR_MEH lr
+
+ mfcr lr, epc
+ movi tls, \epc_inc
+ add lr, tls
+ stw lr, (sp, 8)
+
+ mfcr lr, epsr
+ stw lr, (sp, 12)
+ btsti lr, 31
+ bf 1f
+ addi lr, sp, 152
+ br 2f
+1:
+ mfcr lr, usp
+2:
+ stw lr, (sp, 16)
+
+ stw a0, (sp, 20)
+ stw a0, (sp, 24)
+ stw a1, (sp, 28)
+ stw a2, (sp, 32)
+ stw a3, (sp, 36)
+
+ addi sp, 40
+ stm r4-r13, (sp)
+
+ addi sp, 40
+ stm r16-r30, (sp)
+#ifdef CONFIG_CPU_HAS_HILO
+ mfhi lr
+ stw lr, (sp, 60)
+ mflo lr
+ stw lr, (sp, 64)
+ mfcr lr, cr14
+ stw lr, (sp, 68)
+#endif
+ subi sp, 80
+.endm
+
+.macro RESTORE_ALL
+ ldw tls, (sp, 0)
+ ldw lr, (sp, 4)
+ ldw a0, (sp, 8)
+ mtcr a0, epc
+ ldw a0, (sp, 12)
+ mtcr a0, epsr
+ btsti a0, 31
+ ldw a0, (sp, 16)
+ mtcr a0, usp
+ mtcr a0, ss0
+
+#ifdef CONFIG_CPU_HAS_HILO
+ ldw a0, (sp, 140)
+ mthi a0
+ ldw a0, (sp, 144)
+ mtlo a0
+ ldw a0, (sp, 148)
+ mtcr a0, cr14
+#endif
+
+ ldw a0, (sp, 24)
+ ldw a1, (sp, 28)
+ ldw a2, (sp, 32)
+ ldw a3, (sp, 36)
+
+ addi sp, 40
+ ldm r4-r13, (sp)
+ addi sp, 40
+ ldm r16-r30, (sp)
+ addi sp, 72
+ bf 1f
+ mfcr sp, ss0
+1:
+ rte
+.endm
+
+.macro SAVE_REGS_FTRACE
+ subi sp, 152
+ stw tls, (sp, 0)
+ stw lr, (sp, 4)
+
+ mfcr lr, psr
+ stw lr, (sp, 12)
+
+ addi lr, sp, 152
+ stw lr, (sp, 16)
+
+ stw a0, (sp, 20)
+ stw a0, (sp, 24)
+ stw a1, (sp, 28)
+ stw a2, (sp, 32)
+ stw a3, (sp, 36)
+
+ addi sp, 40
+ stm r4-r13, (sp)
+
+ addi sp, 40
+ stm r16-r30, (sp)
+#ifdef CONFIG_CPU_HAS_HILO
+ mfhi lr
+ stw lr, (sp, 60)
+ mflo lr
+ stw lr, (sp, 64)
+ mfcr lr, cr14
+ stw lr, (sp, 68)
+#endif
+ subi sp, 80
+.endm
+
+.macro RESTORE_REGS_FTRACE
+ ldw tls, (sp, 0)
+
+#ifdef CONFIG_CPU_HAS_HILO
+ ldw a0, (sp, 140)
+ mthi a0
+ ldw a0, (sp, 144)
+ mtlo a0
+ ldw a0, (sp, 148)
+ mtcr a0, cr14
+#endif
+
+ ldw a0, (sp, 24)
+ ldw a1, (sp, 28)
+ ldw a2, (sp, 32)
+ ldw a3, (sp, 36)
+
+ addi sp, 40
+ ldm r4-r13, (sp)
+ addi sp, 40
+ ldm r16-r30, (sp)
+ addi sp, 72
+.endm
+
+.macro SAVE_SWITCH_STACK
+ subi sp, 64
+ stm r4-r11, (sp)
+ stw lr, (sp, 32)
+ stw r16, (sp, 36)
+ stw r17, (sp, 40)
+ stw r26, (sp, 44)
+ stw r27, (sp, 48)
+ stw r28, (sp, 52)
+ stw r29, (sp, 56)
+ stw r30, (sp, 60)
+#ifdef CONFIG_CPU_HAS_HILO
+ subi sp, 16
+ mfhi lr
+ stw lr, (sp, 0)
+ mflo lr
+ stw lr, (sp, 4)
+ mfcr lr, cr14
+ stw lr, (sp, 8)
+#endif
+.endm
+
+.macro RESTORE_SWITCH_STACK
+#ifdef CONFIG_CPU_HAS_HILO
+ ldw lr, (sp, 0)
+ mthi lr
+ ldw lr, (sp, 4)
+ mtlo lr
+ ldw lr, (sp, 8)
+ mtcr lr, cr14
+ addi sp, 16
+#endif
+ ldm r4-r11, (sp)
+ ldw lr, (sp, 32)
+ ldw r16, (sp, 36)
+ ldw r17, (sp, 40)
+ ldw r26, (sp, 44)
+ ldw r27, (sp, 48)
+ ldw r28, (sp, 52)
+ ldw r29, (sp, 56)
+ ldw r30, (sp, 60)
+ addi sp, 64
+.endm
+
+/* MMU registers operators. */
+.macro RD_MIR rx
+ mfcr \rx, cr<0, 15>
+.endm
+
+.macro RD_MEH rx
+ mfcr \rx, cr<4, 15>
+.endm
+
+.macro RD_MCIR rx
+ mfcr \rx, cr<8, 15>
+.endm
+
+.macro RD_PGDR rx
+ mfcr \rx, cr<29, 15>
+.endm
+
+.macro RD_PGDR_K rx
+ mfcr \rx, cr<28, 15>
+.endm
+
+.macro WR_MEH rx
+ mtcr \rx, cr<4, 15>
+.endm
+
+.macro WR_MCIR rx
+ mtcr \rx, cr<8, 15>
+.endm
+
+#ifdef CONFIG_PAGE_OFFSET_80000000
+#define MSA_SET cr<30, 15>
+#define MSA_CLR cr<31, 15>
+#endif
+
+#ifdef CONFIG_PAGE_OFFSET_A0000000
+#define MSA_SET cr<31, 15>
+#define MSA_CLR cr<30, 15>
+#endif
+
+.macro SETUP_MMU
+ /* Init psr and enable ee */
+ lrw r6, DEFAULT_PSR_VALUE
+ mtcr r6, psr
+ psrset ee
+
+ /* Invalid I/Dcache BTB BHT */
+ movi r6, 7
+ lsli r6, 16
+ addi r6, (1<<4) | 3
+ mtcr r6, cr17
+
+ /* Invalid all TLB */
+ bgeni r6, 26
+ mtcr r6, cr<8, 15> /* Set MCIR */
+
+ /* Check MMU on/off */
+ mfcr r6, cr18
+ btsti r6, 0
+ bt 1f
+
+ /* MMU off: setup mapping tlb entry */
+ movi r6, 0
+ mtcr r6, cr<6, 15> /* Set MPR with 4K page size */
+
+ grs r6, 1f /* Get current pa by PC */
+ bmaski r7, (PAGE_SHIFT + 1) /* r7 = 0x1fff */
+ andn r6, r7
+ mtcr r6, cr<4, 15> /* Set MEH */
+
+ mov r8, r6
+ movi r7, 0x00000006
+ or r8, r7
+ mtcr r8, cr<2, 15> /* Set MEL0 */
+ movi r7, 0x00001006
+ or r8, r7
+ mtcr r8, cr<3, 15> /* Set MEL1 */
+
+ bgeni r8, 28
+ mtcr r8, cr<8, 15> /* Set MCIR to write TLB */
+
+ br 2f
+1:
+ /*
+ * MMU on: use origin MSA value from bootloader
+ *
+ * cr<30/31, 15> MSA register format:
+ * 31 - 29 | 28 - 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * BA Reserved SH WA B SO SEC C D V
+ */
+ mfcr r6, MSA_SET /* Get MSA */
+2:
+ lsri r6, 29
+ lsli r6, 29
+ addi r6, 0x1ce
+ mtcr r6, MSA_SET /* Set MSA */
+
+ movi r6, 0
+ mtcr r6, MSA_CLR /* Clr MSA */
+
+ /* enable MMU */
+ mfcr r6, cr18
+ bseti r6, 0
+ mtcr r6, cr18
+
+ jmpi 3f /* jump to va */
+3:
+.endm
+#endif /* __ASM_CSKY_ENTRY_H */
diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h
new file mode 100644
index 000000000..aabb79355
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/fpu.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_FPU_H
+#define __ASM_CSKY_FPU_H
+
+#include <asm/sigcontext.h>
+#include <asm/ptrace.h>
+
+int fpu_libc_helper(struct pt_regs *regs);
+void fpu_fpe(struct pt_regs *regs);
+
+static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); }
+
+void save_to_user_fp(struct user_fp *user_fp);
+void restore_from_user_fp(struct user_fp *user_fp);
+
+/*
+ * Define the fesr bit for fpe handle.
+ */
+#define FPE_ILLE (1 << 16) /* Illegal instruction */
+#define FPE_FEC (1 << 7) /* Input float-point arithmetic exception */
+#define FPE_IDC (1 << 5) /* Input denormalized exception */
+#define FPE_IXC (1 << 4) /* Inexact exception */
+#define FPE_UFC (1 << 3) /* Underflow exception */
+#define FPE_OFC (1 << 2) /* Overflow exception */
+#define FPE_DZC (1 << 1) /* Divide by zero exception */
+#define FPE_IOC (1 << 0) /* Invalid operation exception */
+#define FPE_REGULAR_EXCEPTION (FPE_IXC | FPE_UFC | FPE_OFC | FPE_DZC | FPE_IOC)
+
+#ifdef CONFIG_OPEN_FPU_IDE
+#define IDE_STAT (1 << 5)
+#else
+#define IDE_STAT 0
+#endif
+
+#ifdef CONFIG_OPEN_FPU_IXE
+#define IXE_STAT (1 << 4)
+#else
+#define IXE_STAT 0
+#endif
+
+#ifdef CONFIG_OPEN_FPU_UFE
+#define UFE_STAT (1 << 3)
+#else
+#define UFE_STAT 0
+#endif
+
+#ifdef CONFIG_OPEN_FPU_OFE
+#define OFE_STAT (1 << 2)
+#else
+#define OFE_STAT 0
+#endif
+
+#ifdef CONFIG_OPEN_FPU_DZE
+#define DZE_STAT (1 << 1)
+#else
+#define DZE_STAT 0
+#endif
+
+#ifdef CONFIG_OPEN_FPU_IOE
+#define IOE_STAT (1 << 0)
+#else
+#define IOE_STAT 0
+#endif
+
+#endif /* __ASM_CSKY_FPU_H */
diff --git a/arch/csky/abiv2/inc/abi/page.h b/arch/csky/abiv2/inc/abi/page.h
new file mode 100644
index 000000000..cf005f13c
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/page.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+static inline void clear_user_page(void *addr, unsigned long vaddr,
+ struct page *page)
+{
+ clear_page(addr);
+}
+
+static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
+ struct page *page)
+{
+ copy_page(to, from);
+}
diff --git a/arch/csky/abiv2/inc/abi/pgtable-bits.h b/arch/csky/abiv2/inc/abi/pgtable-bits.h
new file mode 100644
index 000000000..7e7f389f5
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/pgtable-bits.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PGTABLE_BITS_H
+#define __ASM_CSKY_PGTABLE_BITS_H
+
+/* implemented in software */
+#define _PAGE_ACCESSED (1<<7)
+#define _PAGE_READ (1<<8)
+#define _PAGE_WRITE (1<<9)
+#define _PAGE_PRESENT (1<<10)
+#define _PAGE_MODIFIED (1<<11)
+
+/* implemented in hardware */
+#define _PAGE_GLOBAL (1<<0)
+#define _PAGE_VALID (1<<1)
+#define _PAGE_DIRTY (1<<2)
+
+#define _PAGE_SO (1<<5)
+#define _PAGE_BUF (1<<6)
+#define _PAGE_CACHE (1<<3)
+#define _CACHE_MASK _PAGE_CACHE
+
+#define _CACHE_CACHED (_PAGE_CACHE | _PAGE_BUF)
+#define _CACHE_UNCACHED (0)
+
+#define _PAGE_PROT_NONE _PAGE_WRITE
+
+/*
+ * Encode and decode a swap entry
+ *
+ * Format of swap PTE:
+ * bit 0: _PAGE_GLOBAL (zero)
+ * bit 1: _PAGE_VALID (zero)
+ * bit 2 - 6: swap type
+ * bit 7 - 8: swap offset[0 - 1]
+ * bit 9: _PAGE_WRITE (zero)
+ * bit 10: _PAGE_PRESENT (zero)
+ * bit 11 - 31: swap offset[2 - 22]
+ */
+#define __swp_type(x) (((x).val >> 2) & 0x1f)
+#define __swp_offset(x) ((((x).val >> 7) & 0x3) | \
+ (((x).val >> 9) & 0x7ffffc))
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type & 0x1f) << 2) | \
+ ((offset & 0x3) << 7) | \
+ ((offset & 0x7ffffc) << 9)})
+
+#endif /* __ASM_CSKY_PGTABLE_BITS_H */
diff --git a/arch/csky/abiv2/inc/abi/reg_ops.h b/arch/csky/abiv2/inc/abi/reg_ops.h
new file mode 100644
index 000000000..49ba18a64
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/reg_ops.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_REG_OPS_H
+#define __ABI_REG_OPS_H
+#include <asm/reg_ops.h>
+
+static inline unsigned int mfcr_hint(void)
+{
+ return mfcr("cr31");
+}
+
+static inline unsigned int mfcr_ccr2(void)
+{
+ return mfcr("cr23");
+}
+#endif /* __ABI_REG_OPS_H */
diff --git a/arch/csky/abiv2/inc/abi/regdef.h b/arch/csky/abiv2/inc/abi/regdef.h
new file mode 100644
index 000000000..0933addbc
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/regdef.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_REGDEF_H
+#define __ASM_CSKY_REGDEF_H
+
+#ifdef __ASSEMBLY__
+#define syscallid r7
+#else
+#define syscallid "r7"
+#endif
+
+#define regs_syscallid(regs) regs->regs[3]
+#define regs_fp(regs) regs->regs[4]
+
+/*
+ * PSR format:
+ * | 31 | 30-24 | 23-16 | 15 14 | 13-10 | 9 | 8-0 |
+ * S VEC TM MM
+ *
+ * S: Super Mode
+ * VEC: Exception Number
+ * TM: Trace Mode
+ * MM: Memory unaligned addr access
+ */
+#define DEFAULT_PSR_VALUE 0x80000200
+
+#define SYSTRACE_SAVENUM 5
+
+#define TRAP0_SIZE 4
+
+#endif /* __ASM_CSKY_REGDEF_H */
diff --git a/arch/csky/abiv2/inc/abi/string.h b/arch/csky/abiv2/inc/abi/string.h
new file mode 100644
index 000000000..f01bad2ac
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/string.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_STRING_H
+#define __ABI_CSKY_STRING_H
+
+#define __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *, int, __kernel_size_t);
+
+#define __HAVE_ARCH_STRCMP
+extern int strcmp(const char *, const char *);
+
+#define __HAVE_ARCH_STRCPY
+extern char *strcpy(char *, const char *);
+
+#define __HAVE_ARCH_STRLEN
+extern __kernel_size_t strlen(const char *);
+
+#endif /* __ABI_CSKY_STRING_H */
diff --git a/arch/csky/abiv2/inc/abi/switch_context.h b/arch/csky/abiv2/inc/abi/switch_context.h
new file mode 100644
index 000000000..5dd5c3f4e
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/switch_context.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_PTRACE_H
+#define __ABI_CSKY_PTRACE_H
+
+struct switch_stack {
+#ifdef CONFIG_CPU_HAS_HILO
+ unsigned long rhi;
+ unsigned long rlo;
+ unsigned long cr14;
+ unsigned long pad;
+#endif
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+
+ unsigned long r15;
+ unsigned long r16;
+ unsigned long r17;
+ unsigned long r26;
+ unsigned long r27;
+ unsigned long r28;
+ unsigned long r29;
+ unsigned long r30;
+};
+#endif /* __ABI_CSKY_PTRACE_H */
diff --git a/arch/csky/abiv2/inc/abi/vdso.h b/arch/csky/abiv2/inc/abi/vdso.h
new file mode 100644
index 000000000..40fd10d89
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/vdso.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_VDSO_H
+#define __ABI_CSKY_VDSO_H
+
+/* movi r7, 173 */
+#define SET_SYSCALL_ID .long 0x008bea07
+
+#endif /* __ABI_CSKY_VDSO_H */
diff --git a/arch/csky/abiv2/mcount.S b/arch/csky/abiv2/mcount.S
new file mode 100644
index 000000000..d745e10c1
--- /dev/null
+++ b/arch/csky/abiv2/mcount.S
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+#include <abi/entry.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * csky-gcc with -pg will put the following asm after prologue:
+ * push r15
+ * jsri _mcount
+ *
+ * stack layout after mcount_enter in _mcount():
+ *
+ * current sp => 0:+-------+
+ * | a0-a3 | -> must save all argument regs
+ * +16:+-------+
+ * | lr | -> _mcount lr (instrumente function's pc)
+ * +20:+-------+
+ * | fp=r8 | -> instrumented function fp
+ * +24:+-------+
+ * | plr | -> instrumented function lr (parent's pc)
+ * +-------+
+ */
+
+.macro mcount_enter
+ subi sp, 24
+ stw a0, (sp, 0)
+ stw a1, (sp, 4)
+ stw a2, (sp, 8)
+ stw a3, (sp, 12)
+ stw lr, (sp, 16)
+ stw r8, (sp, 20)
+.endm
+
+.macro mcount_exit
+ ldw a0, (sp, 0)
+ ldw a1, (sp, 4)
+ ldw a2, (sp, 8)
+ ldw a3, (sp, 12)
+ ldw t1, (sp, 16)
+ ldw r8, (sp, 20)
+ ldw lr, (sp, 24)
+ addi sp, 28
+ jmp t1
+.endm
+
+.macro mcount_enter_regs
+ subi sp, 8
+ stw lr, (sp, 0)
+ stw r8, (sp, 4)
+ SAVE_REGS_FTRACE
+.endm
+
+.macro mcount_exit_regs
+ RESTORE_REGS_FTRACE
+ subi sp, 152
+ ldw t1, (sp, 4)
+ addi sp, 152
+ ldw r8, (sp, 4)
+ ldw lr, (sp, 8)
+ addi sp, 12
+ jmp t1
+.endm
+
+.macro save_return_regs
+ subi sp, 16
+ stw a0, (sp, 0)
+ stw a1, (sp, 4)
+ stw a2, (sp, 8)
+ stw a3, (sp, 12)
+.endm
+
+.macro restore_return_regs
+ mov lr, a0
+ ldw a0, (sp, 0)
+ ldw a1, (sp, 4)
+ ldw a2, (sp, 8)
+ ldw a3, (sp, 12)
+ addi sp, 16
+.endm
+
+.macro nop32_stub
+ nop32
+ nop32
+ nop32
+.endm
+
+ENTRY(ftrace_stub)
+ jmp lr
+END(ftrace_stub)
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+ENTRY(_mcount)
+ mcount_enter
+
+ /* r26 is link register, only used with jsri translation */
+ lrw r26, ftrace_trace_function
+ ldw r26, (r26, 0)
+ lrw a1, ftrace_stub
+ cmpne r26, a1
+ bf skip_ftrace
+
+ mov a0, lr
+ subi a0, 4
+ ldw a1, (sp, 24)
+ lrw a2, function_trace_op
+ ldw a2, (a2, 0)
+
+ jsr r26
+
+#ifndef CONFIG_FUNCTION_GRAPH_TRACER
+skip_ftrace:
+ mcount_exit
+#else
+skip_ftrace:
+ lrw a0, ftrace_graph_return
+ ldw a0, (a0, 0)
+ lrw a1, ftrace_stub
+ cmpne a0, a1
+ bt ftrace_graph_caller
+
+ lrw a0, ftrace_graph_entry
+ ldw a0, (a0, 0)
+ lrw a1, ftrace_graph_entry_stub
+ cmpne a0, a1
+ bt ftrace_graph_caller
+
+ mcount_exit
+#endif
+END(_mcount)
+#else /* CONFIG_DYNAMIC_FTRACE */
+ENTRY(_mcount)
+ mov t1, lr
+ ldw lr, (sp, 0)
+ addi sp, 4
+ jmp t1
+ENDPROC(_mcount)
+
+ENTRY(ftrace_caller)
+ mcount_enter
+
+ ldw a0, (sp, 16)
+ subi a0, 4
+ ldw a1, (sp, 24)
+ lrw a2, function_trace_op
+ ldw a2, (a2, 0)
+
+ nop
+GLOBAL(ftrace_call)
+ nop32_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ nop
+GLOBAL(ftrace_graph_call)
+ nop32_stub
+#endif
+
+ mcount_exit
+ENDPROC(ftrace_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+ mov a0, sp
+ addi a0, 24
+ ldw a1, (sp, 16)
+ subi a1, 4
+ mov a2, r8
+ lrw r26, prepare_ftrace_return
+ jsr r26
+ mcount_exit
+END(ftrace_graph_caller)
+
+ENTRY(return_to_handler)
+ save_return_regs
+ mov a0, r8
+ jsri ftrace_return_to_handler
+ restore_return_regs
+ jmp lr
+END(return_to_handler)
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ENTRY(ftrace_regs_caller)
+ mcount_enter_regs
+
+ lrw t1, PT_FRAME_SIZE
+ add t1, sp
+
+ ldw a0, (t1, 0)
+ subi a0, 4
+ ldw a1, (t1, 8)
+ lrw a2, function_trace_op
+ ldw a2, (a2, 0)
+ mov a3, sp
+
+ nop
+GLOBAL(ftrace_regs_call)
+ nop32_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ nop
+GLOBAL(ftrace_graph_regs_call)
+ nop32_stub
+#endif
+
+ mcount_exit_regs
+ENDPROC(ftrace_regs_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/csky/abiv2/memcmp.S b/arch/csky/abiv2/memcmp.S
new file mode 100644
index 000000000..bf0d809f0
--- /dev/null
+++ b/arch/csky/abiv2/memcmp.S
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include "sysdep.h"
+
+ENTRY(memcmp)
+ /* Test if len less than 4 bytes. */
+ mov r3, r0
+ movi r0, 0
+ mov r12, r4
+ cmplti r2, 4
+ bt .L_compare_by_byte
+
+ andi r13, r0, 3
+ movi r19, 4
+
+ /* Test if s1 is not 4 bytes aligned. */
+ bnez r13, .L_s1_not_aligned
+
+ LABLE_ALIGN
+.L_s1_aligned:
+ /* If dest is aligned, then copy. */
+ zext r18, r2, 31, 4
+ /* Test if len less than 16 bytes. */
+ bez r18, .L_compare_by_word
+
+.L_compare_by_4word:
+ /* If aligned, load word each time. */
+ ldw r20, (r3, 0)
+ ldw r21, (r1, 0)
+ /* If s1[i] != s2[i], goto .L_byte_check. */
+ cmpne r20, r21
+ bt .L_byte_check
+
+ ldw r20, (r3, 4)
+ ldw r21, (r1, 4)
+ cmpne r20, r21
+ bt .L_byte_check
+
+ ldw r20, (r3, 8)
+ ldw r21, (r1, 8)
+ cmpne r20, r21
+ bt .L_byte_check
+
+ ldw r20, (r3, 12)
+ ldw r21, (r1, 12)
+ cmpne r20, r21
+ bt .L_byte_check
+
+ PRE_BNEZAD (r18)
+ addi a3, 16
+ addi a1, 16
+
+ BNEZAD (r18, .L_compare_by_4word)
+
+.L_compare_by_word:
+ zext r18, r2, 3, 2
+ bez r18, .L_compare_by_byte
+.L_compare_by_word_loop:
+ ldw r20, (r3, 0)
+ ldw r21, (r1, 0)
+ addi r3, 4
+ PRE_BNEZAD (r18)
+ cmpne r20, r21
+ addi r1, 4
+ bt .L_byte_check
+ BNEZAD (r18, .L_compare_by_word_loop)
+
+.L_compare_by_byte:
+ zext r18, r2, 1, 0
+ bez r18, .L_return
+.L_compare_by_byte_loop:
+ ldb r0, (r3, 0)
+ ldb r4, (r1, 0)
+ addi r3, 1
+ subu r0, r4
+ PRE_BNEZAD (r18)
+ addi r1, 1
+ bnez r0, .L_return
+ BNEZAD (r18, .L_compare_by_byte_loop)
+
+.L_return:
+ mov r4, r12
+ rts
+
+# ifdef __CSKYBE__
+/* d[i] != s[i] in word, so we check byte 0. */
+.L_byte_check:
+ xtrb0 r0, r20
+ xtrb0 r2, r21
+ subu r0, r2
+ bnez r0, .L_return
+
+ /* check byte 1 */
+ xtrb1 r0, r20
+ xtrb1 r2, r21
+ subu r0, r2
+ bnez r0, .L_return
+
+ /* check byte 2 */
+ xtrb2 r0, r20
+ xtrb2 r2, r21
+ subu r0, r2
+ bnez r0, .L_return
+
+ /* check byte 3 */
+ xtrb3 r0, r20
+ xtrb3 r2, r21
+ subu r0, r2
+# else
+/* s1[i] != s2[i] in word, so we check byte 3. */
+.L_byte_check:
+ xtrb3 r0, r20
+ xtrb3 r2, r21
+ subu r0, r2
+ bnez r0, .L_return
+
+ /* check byte 2 */
+ xtrb2 r0, r20
+ xtrb2 r2, r21
+ subu r0, r2
+ bnez r0, .L_return
+
+ /* check byte 1 */
+ xtrb1 r0, r20
+ xtrb1 r2, r21
+ subu r0, r2
+ bnez r0, .L_return
+
+ /* check byte 0 */
+ xtrb0 r0, r20
+ xtrb0 r2, r21
+ subu r0, r2
+ br .L_return
+# endif /* !__CSKYBE__ */
+
+/* Compare when s1 is not aligned. */
+.L_s1_not_aligned:
+ sub r13, r19, r13
+ sub r2, r13
+.L_s1_not_aligned_loop:
+ ldb r0, (r3, 0)
+ ldb r4, (r1, 0)
+ addi r3, 1
+ subu r0, r4
+ PRE_BNEZAD (r13)
+ addi r1, 1
+ bnez r0, .L_return
+ BNEZAD (r13, .L_s1_not_aligned_loop)
+ br .L_s1_aligned
+ENDPROC(memcmp)
diff --git a/arch/csky/abiv2/memcpy.S b/arch/csky/abiv2/memcpy.S
new file mode 100644
index 000000000..145bf3a93
--- /dev/null
+++ b/arch/csky/abiv2/memcpy.S
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include "sysdep.h"
+
+ENTRY(__memcpy)
+ENTRY(memcpy)
+ /* Test if len less than 4 bytes. */
+ mov r12, r0
+ cmplti r2, 4
+ bt .L_copy_by_byte
+
+ andi r13, r0, 3
+ movi r19, 4
+ /* Test if dest is not 4 bytes aligned. */
+ bnez r13, .L_dest_not_aligned
+
+/* Hardware can handle unaligned access directly. */
+.L_dest_aligned:
+ /* If dest is aligned, then copy. */
+ zext r18, r2, 31, 4
+
+ /* Test if len less than 16 bytes. */
+ bez r18, .L_len_less_16bytes
+ movi r19, 0
+
+ LABLE_ALIGN
+.L_len_larger_16bytes:
+#if defined(__CK860__)
+ ldw r3, (r1, 0)
+ stw r3, (r0, 0)
+ ldw r3, (r1, 4)
+ stw r3, (r0, 4)
+ ldw r3, (r1, 8)
+ stw r3, (r0, 8)
+ ldw r3, (r1, 12)
+ addi r1, 16
+ stw r3, (r0, 12)
+ addi r0, 16
+#else
+ ldw r20, (r1, 0)
+ ldw r21, (r1, 4)
+ ldw r22, (r1, 8)
+ ldw r23, (r1, 12)
+ stw r20, (r0, 0)
+ stw r21, (r0, 4)
+ stw r22, (r0, 8)
+ stw r23, (r0, 12)
+ PRE_BNEZAD (r18)
+ addi r1, 16
+ addi r0, 16
+#endif
+ BNEZAD (r18, .L_len_larger_16bytes)
+
+.L_len_less_16bytes:
+ zext r18, r2, 3, 2
+ bez r18, .L_copy_by_byte
+.L_len_less_16bytes_loop:
+ ldw r3, (r1, 0)
+ PRE_BNEZAD (r18)
+ addi r1, 4
+ stw r3, (r0, 0)
+ addi r0, 4
+ BNEZAD (r18, .L_len_less_16bytes_loop)
+
+/* Test if len less than 4 bytes. */
+.L_copy_by_byte:
+ zext r18, r2, 1, 0
+ bez r18, .L_return
+.L_copy_by_byte_loop:
+ ldb r3, (r1, 0)
+ PRE_BNEZAD (r18)
+ addi r1, 1
+ stb r3, (r0, 0)
+ addi r0, 1
+ BNEZAD (r18, .L_copy_by_byte_loop)
+
+.L_return:
+ mov r0, r12
+ rts
+
+/*
+ * If dest is not aligned, just copying some bytes makes the
+ * dest align.
+ */
+.L_dest_not_aligned:
+ sub r13, r19, r13
+ sub r2, r13
+
+/* Makes the dest align. */
+.L_dest_not_aligned_loop:
+ ldb r3, (r1, 0)
+ PRE_BNEZAD (r13)
+ addi r1, 1
+ stb r3, (r0, 0)
+ addi r0, 1
+ BNEZAD (r13, .L_dest_not_aligned_loop)
+ cmplti r2, 4
+ bt .L_copy_by_byte
+
+ /* Check whether the src is aligned. */
+ jbr .L_dest_aligned
+ENDPROC(__memcpy)
diff --git a/arch/csky/abiv2/memmove.S b/arch/csky/abiv2/memmove.S
new file mode 100644
index 000000000..5721e73ad
--- /dev/null
+++ b/arch/csky/abiv2/memmove.S
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include "sysdep.h"
+
+ .weak memmove
+ENTRY(__memmove)
+ENTRY(memmove)
+ subu r3, r0, r1
+ cmphs r3, r2
+ bt memcpy
+
+ mov r12, r0
+ addu r0, r0, r2
+ addu r1, r1, r2
+
+ /* Test if len less than 4 bytes. */
+ cmplti r2, 4
+ bt .L_copy_by_byte
+
+ andi r13, r0, 3
+ /* Test if dest is not 4 bytes aligned. */
+ bnez r13, .L_dest_not_aligned
+ /* Hardware can handle unaligned access directly. */
+.L_dest_aligned:
+ /* If dest is aligned, then copy. */
+ zext r18, r2, 31, 4
+ /* Test if len less than 16 bytes. */
+ bez r18, .L_len_less_16bytes
+ movi r19, 0
+
+ /* len > 16 bytes */
+ LABLE_ALIGN
+.L_len_larger_16bytes:
+ subi r1, 16
+ subi r0, 16
+#if defined(__CK860__)
+ ldw r3, (r1, 12)
+ stw r3, (r0, 12)
+ ldw r3, (r1, 8)
+ stw r3, (r0, 8)
+ ldw r3, (r1, 4)
+ stw r3, (r0, 4)
+ ldw r3, (r1, 0)
+ stw r3, (r0, 0)
+#else
+ ldw r20, (r1, 0)
+ ldw r21, (r1, 4)
+ ldw r22, (r1, 8)
+ ldw r23, (r1, 12)
+ stw r20, (r0, 0)
+ stw r21, (r0, 4)
+ stw r22, (r0, 8)
+ stw r23, (r0, 12)
+ PRE_BNEZAD (r18)
+#endif
+ BNEZAD (r18, .L_len_larger_16bytes)
+
+.L_len_less_16bytes:
+ zext r18, r2, 3, 2
+ bez r18, .L_copy_by_byte
+.L_len_less_16bytes_loop:
+ subi r1, 4
+ subi r0, 4
+ ldw r3, (r1, 0)
+ PRE_BNEZAD (r18)
+ stw r3, (r0, 0)
+ BNEZAD (r18, .L_len_less_16bytes_loop)
+
+ /* Test if len less than 4 bytes. */
+.L_copy_by_byte:
+ zext r18, r2, 1, 0
+ bez r18, .L_return
+.L_copy_by_byte_loop:
+ subi r1, 1
+ subi r0, 1
+ ldb r3, (r1, 0)
+ PRE_BNEZAD (r18)
+ stb r3, (r0, 0)
+ BNEZAD (r18, .L_copy_by_byte_loop)
+
+.L_return:
+ mov r0, r12
+ rts
+
+ /* If dest is not aligned, just copy some bytes makes the dest
+ align. */
+.L_dest_not_aligned:
+ sub r2, r13
+.L_dest_not_aligned_loop:
+ subi r1, 1
+ subi r0, 1
+ /* Makes the dest align. */
+ ldb r3, (r1, 0)
+ PRE_BNEZAD (r13)
+ stb r3, (r0, 0)
+ BNEZAD (r13, .L_dest_not_aligned_loop)
+ cmplti r2, 4
+ bt .L_copy_by_byte
+ /* Check whether the src is aligned. */
+ jbr .L_dest_aligned
+ENDPROC(memmove)
+ENDPROC(__memmove)
diff --git a/arch/csky/abiv2/memset.S b/arch/csky/abiv2/memset.S
new file mode 100644
index 000000000..a7e7d994b
--- /dev/null
+++ b/arch/csky/abiv2/memset.S
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include "sysdep.h"
+
+ .weak memset
+ENTRY(__memset)
+ENTRY(memset)
+ /* Test if len less than 4 bytes. */
+ mov r12, r0
+ cmplti r2, 8
+ bt .L_set_by_byte
+
+ andi r13, r0, 3
+ movi r19, 4
+ /* Test if dest is not 4 bytes aligned. */
+ bnez r13, .L_dest_not_aligned
+ /* Hardware can handle unaligned access directly. */
+.L_dest_aligned:
+ zextb r3, r1
+ lsli r1, 8
+ or r1, r3
+ lsli r3, r1, 16
+ or r3, r1
+
+ /* If dest is aligned, then copy. */
+ zext r18, r2, 31, 4
+ /* Test if len less than 16 bytes. */
+ bez r18, .L_len_less_16bytes
+
+ LABLE_ALIGN
+.L_len_larger_16bytes:
+ stw r3, (r0, 0)
+ stw r3, (r0, 4)
+ stw r3, (r0, 8)
+ stw r3, (r0, 12)
+ PRE_BNEZAD (r18)
+ addi r0, 16
+ BNEZAD (r18, .L_len_larger_16bytes)
+
+.L_len_less_16bytes:
+ zext r18, r2, 3, 2
+ andi r2, 3
+ bez r18, .L_set_by_byte
+.L_len_less_16bytes_loop:
+ stw r3, (r0, 0)
+ PRE_BNEZAD (r18)
+ addi r0, 4
+ BNEZAD (r18, .L_len_less_16bytes_loop)
+
+ /* Test if len less than 4 bytes. */
+.L_set_by_byte:
+ zext r18, r2, 2, 0
+ bez r18, .L_return
+.L_set_by_byte_loop:
+ stb r1, (r0, 0)
+ PRE_BNEZAD (r18)
+ addi r0, 1
+ BNEZAD (r18, .L_set_by_byte_loop)
+
+.L_return:
+ mov r0, r12
+ rts
+
+ /* If dest is not aligned, just set some bytes makes the dest
+ align. */
+
+.L_dest_not_aligned:
+ sub r13, r19, r13
+ sub r2, r13
+.L_dest_not_aligned_loop:
+ /* Makes the dest align. */
+ stb r1, (r0, 0)
+ PRE_BNEZAD (r13)
+ addi r0, 1
+ BNEZAD (r13, .L_dest_not_aligned_loop)
+ cmplti r2, 8
+ bt .L_set_by_byte
+ /* Check whether the src is aligned. */
+ jbr .L_dest_aligned
+ENDPROC(memset)
+ENDPROC(__memset)
diff --git a/arch/csky/abiv2/strcmp.S b/arch/csky/abiv2/strcmp.S
new file mode 100644
index 000000000..f8403f4d8
--- /dev/null
+++ b/arch/csky/abiv2/strcmp.S
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include "sysdep.h"
+
+ENTRY(strcmp)
+ mov a3, a0
+ /* Check if the s1 addr is aligned. */
+ xor a2, a3, a1
+ andi a2, 0x3
+ bnez a2, 7f
+ andi t1, a0, 0x3
+ bnez t1, 5f
+
+1:
+ /* If aligned, load word each time. */
+ ldw t0, (a3, 0)
+ ldw t1, (a1, 0)
+ /* If s1[i] != s2[i], goto 2f. */
+ cmpne t0, t1
+ bt 2f
+ /* If s1[i] == s2[i], check if s1 or s2 is at the end. */
+ tstnbz t0
+ /* If at the end, goto 3f (finish comparing). */
+ bf 3f
+
+ ldw t0, (a3, 4)
+ ldw t1, (a1, 4)
+ cmpne t0, t1
+ bt 2f
+ tstnbz t0
+ bf 3f
+
+ ldw t0, (a3, 8)
+ ldw t1, (a1, 8)
+ cmpne t0, t1
+ bt 2f
+ tstnbz t0
+ bf 3f
+
+ ldw t0, (a3, 12)
+ ldw t1, (a1, 12)
+ cmpne t0, t1
+ bt 2f
+ tstnbz t0
+ bf 3f
+
+ ldw t0, (a3, 16)
+ ldw t1, (a1, 16)
+ cmpne t0, t1
+ bt 2f
+ tstnbz t0
+ bf 3f
+
+ ldw t0, (a3, 20)
+ ldw t1, (a1, 20)
+ cmpne t0, t1
+ bt 2f
+ tstnbz t0
+ bf 3f
+
+ ldw t0, (a3, 24)
+ ldw t1, (a1, 24)
+ cmpne t0, t1
+ bt 2f
+ tstnbz t0
+ bf 3f
+
+ ldw t0, (a3, 28)
+ ldw t1, (a1, 28)
+ cmpne t0, t1
+ bt 2f
+ tstnbz t0
+ bf 3f
+
+ addi a3, 32
+ addi a1, 32
+
+ br 1b
+
+# ifdef __CSKYBE__
+ /* d[i] != s[i] in word, so we check byte 0. */
+2:
+ xtrb0 a0, t0
+ xtrb0 a2, t1
+ subu a0, a2
+ bez a2, 4f
+ bnez a0, 4f
+
+ /* check byte 1 */
+ xtrb1 a0, t0
+ xtrb1 a2, t1
+ subu a0, a2
+ bez a2, 4f
+ bnez a0, 4f
+
+ /* check byte 2 */
+ xtrb2 a0, t0
+ xtrb2 a2, t1
+ subu a0, a2
+ bez a2, 4f
+ bnez a0, 4f
+
+ /* check byte 3 */
+ xtrb3 a0, t0
+ xtrb3 a2, t1
+ subu a0, a2
+# else
+ /* s1[i] != s2[i] in word, so we check byte 3. */
+2:
+ xtrb3 a0, t0
+ xtrb3 a2, t1
+ subu a0, a2
+ bez a2, 4f
+ bnez a0, 4f
+
+ /* check byte 2 */
+ xtrb2 a0, t0
+ xtrb2 a2, t1
+ subu a0, a2
+ bez a2, 4f
+ bnez a0, 4f
+
+ /* check byte 1 */
+ xtrb1 a0, t0
+ xtrb1 a2, t1
+ subu a0, a2
+ bez a2, 4f
+ bnez a0, 4f
+
+ /* check byte 0 */
+ xtrb0 a0, t0
+ xtrb0 a2, t1
+ subu a0, a2
+
+# endif /* !__CSKYBE__ */
+ jmp lr
+3:
+ movi a0, 0
+4:
+ jmp lr
+
+ /* Compare when s1 or s2 is not aligned. */
+5:
+ subi t1, 4
+6:
+ ldb a0, (a3, 0)
+ ldb a2, (a1, 0)
+ subu a0, a2
+ bez a2, 4b
+ bnez a0, 4b
+ addi t1, 1
+ addi a1, 1
+ addi a3, 1
+ bnez t1, 6b
+ br 1b
+
+7:
+ ldb a0, (a3, 0)
+ addi a3, 1
+ ldb a2, (a1, 0)
+ addi a1, 1
+ subu a0, a2
+ bnez a0, 4b
+ bnez a2, 7b
+ jmp r15
+ENDPROC(strcmp)
diff --git a/arch/csky/abiv2/strcpy.S b/arch/csky/abiv2/strcpy.S
new file mode 100644
index 000000000..3c6d3f6a5
--- /dev/null
+++ b/arch/csky/abiv2/strcpy.S
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include "sysdep.h"
+
+ENTRY(strcpy)
+ mov a3, a0
+ /* Check if the src addr is aligned. */
+ andi t0, a1, 3
+ bnez t0, 11f
+1:
+ /* Check if all the bytes in the word are not zero. */
+ ldw a2, (a1)
+ tstnbz a2
+ bf 9f
+ stw a2, (a3)
+
+ ldw a2, (a1, 4)
+ tstnbz a2
+ bf 2f
+ stw a2, (a3, 4)
+
+ ldw a2, (a1, 8)
+ tstnbz a2
+ bf 3f
+ stw a2, (a3, 8)
+
+ ldw a2, (a1, 12)
+ tstnbz a2
+ bf 4f
+ stw a2, (a3, 12)
+
+ ldw a2, (a1, 16)
+ tstnbz a2
+ bf 5f
+ stw a2, (a3, 16)
+
+ ldw a2, (a1, 20)
+ tstnbz a2
+ bf 6f
+ stw a2, (a3, 20)
+
+ ldw a2, (a1, 24)
+ tstnbz a2
+ bf 7f
+ stw a2, (a3, 24)
+
+ ldw a2, (a1, 28)
+ tstnbz a2
+ bf 8f
+ stw a2, (a3, 28)
+
+ addi a3, 32
+ addi a1, 32
+ br 1b
+
+
+2:
+ addi a3, 4
+ br 9f
+
+3:
+ addi a3, 8
+ br 9f
+
+4:
+ addi a3, 12
+ br 9f
+
+5:
+ addi a3, 16
+ br 9f
+
+6:
+ addi a3, 20
+ br 9f
+
+7:
+ addi a3, 24
+ br 9f
+
+8:
+ addi a3, 28
+9:
+# ifdef __CSKYBE__
+ xtrb0 t0, a2
+ st.b t0, (a3)
+ bez t0, 10f
+ xtrb1 t0, a2
+ st.b t0, (a3, 1)
+ bez t0, 10f
+ xtrb2 t0, a2
+ st.b t0, (a3, 2)
+ bez t0, 10f
+ stw a2, (a3)
+# else
+ xtrb3 t0, a2
+ st.b t0, (a3)
+ bez t0, 10f
+ xtrb2 t0, a2
+ st.b t0, (a3, 1)
+ bez t0, 10f
+ xtrb1 t0, a2
+ st.b t0, (a3, 2)
+ bez t0, 10f
+ stw a2, (a3)
+# endif /* !__CSKYBE__ */
+10:
+ jmp lr
+
+11:
+ subi t0, 4
+12:
+ ld.b a2, (a1)
+ st.b a2, (a3)
+ bez a2, 10b
+ addi t0, 1
+ addi a1, a1, 1
+ addi a3, a3, 1
+ bnez t0, 12b
+ jbr 1b
+ENDPROC(strcpy)
diff --git a/arch/csky/abiv2/strksyms.c b/arch/csky/abiv2/strksyms.c
new file mode 100644
index 000000000..8d1fd28c6
--- /dev/null
+++ b/arch/csky/abiv2/strksyms.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/module.h>
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_STRING_OPS
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+#endif
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strlen);
diff --git a/arch/csky/abiv2/strlen.S b/arch/csky/abiv2/strlen.S
new file mode 100644
index 000000000..bcdd70764
--- /dev/null
+++ b/arch/csky/abiv2/strlen.S
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include "sysdep.h"
+
+ENTRY(strlen)
+ /* Check if the start addr is aligned. */
+ mov r3, r0
+ andi r1, r0, 3
+ movi r2, 4
+ movi r0, 0
+ bnez r1, .L_start_not_aligned
+
+ LABLE_ALIGN
+.L_start_addr_aligned:
+ /* Check if all the bytes in the word are not zero. */
+ ldw r1, (r3)
+ tstnbz r1
+ bf .L_string_tail
+
+ ldw r1, (r3, 4)
+ addi r0, 4
+ tstnbz r1
+ bf .L_string_tail
+
+ ldw r1, (r3, 8)
+ addi r0, 4
+ tstnbz r1
+ bf .L_string_tail
+
+ ldw r1, (r3, 12)
+ addi r0, 4
+ tstnbz r1
+ bf .L_string_tail
+
+ ldw r1, (r3, 16)
+ addi r0, 4
+ tstnbz r1
+ bf .L_string_tail
+
+ ldw r1, (r3, 20)
+ addi r0, 4
+ tstnbz r1
+ bf .L_string_tail
+
+ ldw r1, (r3, 24)
+ addi r0, 4
+ tstnbz r1
+ bf .L_string_tail
+
+ ldw r1, (r3, 28)
+ addi r0, 4
+ tstnbz r1
+ bf .L_string_tail
+
+ addi r0, 4
+ addi r3, 32
+ br .L_start_addr_aligned
+
+.L_string_tail:
+# ifdef __CSKYBE__
+ xtrb0 r3, r1
+ bez r3, .L_return
+ addi r0, 1
+ xtrb1 r3, r1
+ bez r3, .L_return
+ addi r0, 1
+ xtrb2 r3, r1
+ bez r3, .L_return
+ addi r0, 1
+# else
+ xtrb3 r3, r1
+ bez r3, .L_return
+ addi r0, 1
+ xtrb2 r3, r1
+ bez r3, .L_return
+ addi r0, 1
+ xtrb1 r3, r1
+ bez r3, .L_return
+ addi r0, 1
+# endif /* !__CSKYBE__ */
+
+.L_return:
+ rts
+
+.L_start_not_aligned:
+ sub r2, r2, r1
+.L_start_not_aligned_loop:
+ ldb r1, (r3)
+ PRE_BNEZAD (r2)
+ addi r3, 1
+ bez r1, .L_return
+ addi r0, 1
+ BNEZAD (r2, .L_start_not_aligned_loop)
+ br .L_start_addr_aligned
+ENDPROC(strlen)
diff --git a/arch/csky/abiv2/sysdep.h b/arch/csky/abiv2/sysdep.h
new file mode 100644
index 000000000..61abe9201
--- /dev/null
+++ b/arch/csky/abiv2/sysdep.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __SYSDEP_H
+#define __SYSDEP_H
+
+#ifdef __ASSEMBLER__
+
+#if defined(__CK860__)
+#define LABLE_ALIGN \
+ .balignw 16, 0x6c03
+
+#define PRE_BNEZAD(R)
+
+#define BNEZAD(R, L) \
+ bnezad R, L
+#else
+#define LABLE_ALIGN \
+ .balignw 8, 0x6c03
+
+#define PRE_BNEZAD(R) \
+ subi R, 1
+
+#define BNEZAD(R, L) \
+ bnez R, L
+#endif
+
+#endif
+
+#endif