From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- arch/csky/abiv2/Makefile | 14 ++ arch/csky/abiv2/cacheflush.c | 92 +++++++++ arch/csky/abiv2/fpu.c | 270 ++++++++++++++++++++++++++ arch/csky/abiv2/inc/abi/cacheflush.h | 60 ++++++ arch/csky/abiv2/inc/abi/ckmmu.h | 139 ++++++++++++++ arch/csky/abiv2/inc/abi/elf.h | 43 +++++ arch/csky/abiv2/inc/abi/entry.h | 314 +++++++++++++++++++++++++++++++ arch/csky/abiv2/inc/abi/fpu.h | 66 +++++++ arch/csky/abiv2/inc/abi/page.h | 13 ++ arch/csky/abiv2/inc/abi/pgtable-bits.h | 53 ++++++ arch/csky/abiv2/inc/abi/reg_ops.h | 16 ++ arch/csky/abiv2/inc/abi/regdef.h | 31 +++ arch/csky/abiv2/inc/abi/string.h | 27 +++ arch/csky/abiv2/inc/abi/switch_context.h | 31 +++ arch/csky/abiv2/inc/abi/vdso.h | 9 + arch/csky/abiv2/mcount.S | 211 +++++++++++++++++++++ arch/csky/abiv2/memcmp.S | 152 +++++++++++++++ arch/csky/abiv2/memcpy.S | 104 ++++++++++ arch/csky/abiv2/memmove.S | 104 ++++++++++ arch/csky/abiv2/memset.S | 83 ++++++++ arch/csky/abiv2/strcmp.S | 168 +++++++++++++++++ arch/csky/abiv2/strcpy.S | 123 ++++++++++++ arch/csky/abiv2/strksyms.c | 14 ++ arch/csky/abiv2/strlen.S | 97 ++++++++++ arch/csky/abiv2/sysdep.h | 29 +++ 25 files changed, 2263 insertions(+) create mode 100644 arch/csky/abiv2/Makefile create mode 100644 arch/csky/abiv2/cacheflush.c create mode 100644 arch/csky/abiv2/fpu.c create mode 100644 arch/csky/abiv2/inc/abi/cacheflush.h create mode 100644 arch/csky/abiv2/inc/abi/ckmmu.h create mode 100644 arch/csky/abiv2/inc/abi/elf.h create mode 100644 arch/csky/abiv2/inc/abi/entry.h create mode 100644 arch/csky/abiv2/inc/abi/fpu.h create mode 100644 arch/csky/abiv2/inc/abi/page.h create mode 100644 arch/csky/abiv2/inc/abi/pgtable-bits.h create mode 100644 arch/csky/abiv2/inc/abi/reg_ops.h create mode 100644 arch/csky/abiv2/inc/abi/regdef.h create mode 100644 arch/csky/abiv2/inc/abi/string.h create mode 100644 arch/csky/abiv2/inc/abi/switch_context.h create mode 100644 arch/csky/abiv2/inc/abi/vdso.h create mode 100644 arch/csky/abiv2/mcount.S create mode 100644 arch/csky/abiv2/memcmp.S create mode 100644 arch/csky/abiv2/memcpy.S create mode 100644 arch/csky/abiv2/memmove.S create mode 100644 arch/csky/abiv2/memset.S create mode 100644 arch/csky/abiv2/strcmp.S create mode 100644 arch/csky/abiv2/strcpy.S create mode 100644 arch/csky/abiv2/strksyms.c create mode 100644 arch/csky/abiv2/strlen.S create mode 100644 arch/csky/abiv2/sysdep.h (limited to 'arch/csky/abiv2') diff --git a/arch/csky/abiv2/Makefile b/arch/csky/abiv2/Makefile new file mode 100644 index 0000000000..ea8005fe01 --- /dev/null +++ b/arch/csky/abiv2/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += cacheflush.o +obj-$(CONFIG_CPU_HAS_FPU) += fpu.o +obj-y += memcmp.o +ifeq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_STRING_OPS), y) +obj-y += memcpy.o +obj-y += memmove.o +obj-y += memset.o +endif +obj-y += strcmp.o +obj-y += strcpy.o +obj-y += strlen.o +obj-y += strksyms.o +obj-$(CONFIG_FUNCTION_TRACER) += mcount.o diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c new file mode 100644 index 0000000000..876028b108 --- /dev/null +++ b/arch/csky/abiv2/cacheflush.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include +#include + +void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long address, pte_t *pte, unsigned int nr) +{ + unsigned long pfn = pte_pfn(*pte); + struct folio *folio; + unsigned int i; + + flush_tlb_page(vma, address); + + if (!pfn_valid(pfn)) + return; + + folio = page_folio(pfn_to_page(pfn)); + + if (test_and_set_bit(PG_dcache_clean, &folio->flags)) + return; + + icache_inv_range(address, address + nr*PAGE_SIZE); + for (i = 0; i < folio_nr_pages(folio); i++) { + unsigned long addr = (unsigned long) kmap_local_folio(folio, + i * PAGE_SIZE); + + dcache_wb_range(addr, addr + PAGE_SIZE); + if (vma->vm_flags & VM_EXEC) + icache_inv_range(addr, addr + PAGE_SIZE); + kunmap_local((void *) addr); + } +} + +void flush_icache_deferred(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + cpumask_t *mask = &mm->context.icache_stale_mask; + + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + /* + * Ensure the remote hart's writes are visible to this hart. + * This pairs with a barrier in flush_icache_mm. + */ + smp_mb(); + local_icache_inv_all(NULL); + } +} + +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + unsigned int cpu; + cpumask_t others, *mask; + + preempt_disable(); + +#ifdef CONFIG_CPU_HAS_ICACHE_INS + if (mm == current->mm) { + icache_inv_range(start, end); + preempt_enable(); + return; + } +#endif + + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); + + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_icache_inv_all(NULL); + + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + + if (mm != current->active_mm || !cpumask_empty(&others)) { + on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); + cpumask_clear(mask); + } + + preempt_enable(); +} diff --git a/arch/csky/abiv2/fpu.c b/arch/csky/abiv2/fpu.c new file mode 100644 index 0000000000..5acc5c2e54 --- /dev/null +++ b/arch/csky/abiv2/fpu.c @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include + +#define MTCR_MASK 0xFC00FFE0 +#define MFCR_MASK 0xFC00FFE0 +#define MTCR_DIST 0xC0006420 +#define MFCR_DIST 0xC0006020 + +/* + * fpu_libc_helper() is to help libc to excute: + * - mfcr %a, cr<1, 2> + * - mfcr %a, cr<2, 2> + * - mtcr %a, cr<1, 2> + * - mtcr %a, cr<2, 2> + */ +int fpu_libc_helper(struct pt_regs *regs) +{ + int fault; + unsigned long instrptr, regx = 0; + unsigned long index = 0, tmp = 0; + unsigned long tinstr = 0; + u16 instr_hi, instr_low; + + instrptr = instruction_pointer(regs); + if (instrptr & 1) + return 0; + + fault = __get_user(instr_low, (u16 *)instrptr); + if (fault) + return 0; + + fault = __get_user(instr_hi, (u16 *)(instrptr + 2)); + if (fault) + return 0; + + tinstr = instr_hi | ((unsigned long)instr_low << 16); + + if (((tinstr >> 21) & 0x1F) != 2) + return 0; + + if ((tinstr & MTCR_MASK) == MTCR_DIST) { + index = (tinstr >> 16) & 0x1F; + if (index > 13) + return 0; + + tmp = tinstr & 0x1F; + if (tmp > 2) + return 0; + + regx = *(®s->a0 + index); + + if (tmp == 1) + mtcr("cr<1, 2>", regx); + else if (tmp == 2) + mtcr("cr<2, 2>", regx); + else + return 0; + + regs->pc += 4; + return 1; + } + + if ((tinstr & MFCR_MASK) == MFCR_DIST) { + index = tinstr & 0x1F; + if (index > 13) + return 0; + + tmp = ((tinstr >> 16) & 0x1F); + if (tmp > 2) + return 0; + + if (tmp == 1) + regx = mfcr("cr<1, 2>"); + else if (tmp == 2) + regx = mfcr("cr<2, 2>"); + else + return 0; + + *(®s->a0 + index) = regx; + + regs->pc += 4; + return 1; + } + + return 0; +} + +void fpu_fpe(struct pt_regs *regs) +{ + int sig, code; + unsigned int fesr; + + fesr = mfcr("cr<2, 2>"); + + sig = SIGFPE; + code = FPE_FLTUNK; + + if (fesr & FPE_ILLE) { + sig = SIGILL; + code = ILL_ILLOPC; + } else if (fesr & FPE_IDC) { + sig = SIGILL; + code = ILL_ILLOPN; + } else if (fesr & FPE_FEC) { + sig = SIGFPE; + if (fesr & FPE_IOC) + code = FPE_FLTINV; + else if (fesr & FPE_DZC) + code = FPE_FLTDIV; + else if (fesr & FPE_UFC) + code = FPE_FLTUND; + else if (fesr & FPE_OFC) + code = FPE_FLTOVF; + else if (fesr & FPE_IXC) + code = FPE_FLTRES; + } + + force_sig_fault(sig, code, (void __user *)regs->pc); +} + +#define FMFVR_FPU_REGS(vrx, vry) \ + "fmfvrl %0, "#vrx"\n" \ + "fmfvrh %1, "#vrx"\n" \ + "fmfvrl %2, "#vry"\n" \ + "fmfvrh %3, "#vry"\n" + +#define FMTVR_FPU_REGS(vrx, vry) \ + "fmtvrl "#vrx", %0\n" \ + "fmtvrh "#vrx", %1\n" \ + "fmtvrl "#vry", %2\n" \ + "fmtvrh "#vry", %3\n" + +#define STW_FPU_REGS(a, b, c, d) \ + "stw %0, (%4, "#a")\n" \ + "stw %1, (%4, "#b")\n" \ + "stw %2, (%4, "#c")\n" \ + "stw %3, (%4, "#d")\n" + +#define LDW_FPU_REGS(a, b, c, d) \ + "ldw %0, (%4, "#a")\n" \ + "ldw %1, (%4, "#b")\n" \ + "ldw %2, (%4, "#c")\n" \ + "ldw %3, (%4, "#d")\n" + +void save_to_user_fp(struct user_fp *user_fp) +{ + unsigned long flg; + unsigned long tmp1, tmp2; + unsigned long *fpregs; + + local_irq_save(flg); + + tmp1 = mfcr("cr<1, 2>"); + tmp2 = mfcr("cr<2, 2>"); + + user_fp->fcr = tmp1; + user_fp->fesr = tmp2; + + fpregs = &user_fp->vr[0]; +#ifdef CONFIG_CPU_HAS_FPUV2 +#ifdef CONFIG_CPU_HAS_VDSP + asm volatile( + "vstmu.32 vr0-vr3, (%0)\n" + "vstmu.32 vr4-vr7, (%0)\n" + "vstmu.32 vr8-vr11, (%0)\n" + "vstmu.32 vr12-vr15, (%0)\n" + "fstmu.64 vr16-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#else + asm volatile( + "fstmu.64 vr0-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#endif +#else + { + unsigned long tmp3, tmp4; + + asm volatile( + FMFVR_FPU_REGS(vr0, vr1) + STW_FPU_REGS(0, 4, 16, 20) + FMFVR_FPU_REGS(vr2, vr3) + STW_FPU_REGS(32, 36, 48, 52) + FMFVR_FPU_REGS(vr4, vr5) + STW_FPU_REGS(64, 68, 80, 84) + FMFVR_FPU_REGS(vr6, vr7) + STW_FPU_REGS(96, 100, 112, 116) + "addi %4, 128\n" + FMFVR_FPU_REGS(vr8, vr9) + STW_FPU_REGS(0, 4, 16, 20) + FMFVR_FPU_REGS(vr10, vr11) + STW_FPU_REGS(32, 36, 48, 52) + FMFVR_FPU_REGS(vr12, vr13) + STW_FPU_REGS(64, 68, 80, 84) + FMFVR_FPU_REGS(vr14, vr15) + STW_FPU_REGS(96, 100, 112, 116) + : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), + "=a"(tmp4), "+a"(fpregs) + ::"memory"); + } +#endif + + local_irq_restore(flg); +} + +void restore_from_user_fp(struct user_fp *user_fp) +{ + unsigned long flg; + unsigned long tmp1, tmp2; + unsigned long *fpregs; + + local_irq_save(flg); + + tmp1 = user_fp->fcr; + tmp2 = user_fp->fesr; + + mtcr("cr<1, 2>", tmp1); + mtcr("cr<2, 2>", tmp2); + + fpregs = &user_fp->vr[0]; +#ifdef CONFIG_CPU_HAS_FPUV2 +#ifdef CONFIG_CPU_HAS_VDSP + asm volatile( + "vldmu.32 vr0-vr3, (%0)\n" + "vldmu.32 vr4-vr7, (%0)\n" + "vldmu.32 vr8-vr11, (%0)\n" + "vldmu.32 vr12-vr15, (%0)\n" + "fldmu.64 vr16-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#else + asm volatile( + "fldmu.64 vr0-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#endif +#else + { + unsigned long tmp3, tmp4; + + asm volatile( + LDW_FPU_REGS(0, 4, 16, 20) + FMTVR_FPU_REGS(vr0, vr1) + LDW_FPU_REGS(32, 36, 48, 52) + FMTVR_FPU_REGS(vr2, vr3) + LDW_FPU_REGS(64, 68, 80, 84) + FMTVR_FPU_REGS(vr4, vr5) + LDW_FPU_REGS(96, 100, 112, 116) + FMTVR_FPU_REGS(vr6, vr7) + "addi %4, 128\n" + LDW_FPU_REGS(0, 4, 16, 20) + FMTVR_FPU_REGS(vr8, vr9) + LDW_FPU_REGS(32, 36, 48, 52) + FMTVR_FPU_REGS(vr10, vr11) + LDW_FPU_REGS(64, 68, 80, 84) + FMTVR_FPU_REGS(vr12, vr13) + LDW_FPU_REGS(96, 100, 112, 116) + FMTVR_FPU_REGS(vr14, vr15) + : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), + "=a"(tmp4), "+a"(fpregs) + ::"memory"); + } +#endif + local_irq_restore(flg); +} diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h new file mode 100644 index 0000000000..40be169072 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/cacheflush.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_CACHEFLUSH_H +#define __ABI_CSKY_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include + +/* + * The cache doesn't need to be flushed when TLB entries change when + * the cache is mapped to physical memory, not virtual memory + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) + +#define PG_dcache_clean PG_arch_1 + +static inline void flush_dcache_folio(struct folio *folio) +{ + if (test_bit(PG_dcache_clean, &folio->flags)) + clear_bit(PG_dcache_clean, &folio->flags); +} +#define flush_dcache_folio flush_dcache_folio + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +static inline void flush_dcache_page(struct page *page) +{ + flush_dcache_folio(page_folio(page)); +} + +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +#define flush_icache_range(start, end) cache_wbinv_range(start, end) + +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +void flush_icache_deferred(struct mm_struct *mm); + +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy(dst, src, len); \ + if (vma->vm_flags & VM_EXEC) { \ + dcache_wb_range((unsigned long)dst, \ + (unsigned long)dst + len); \ + flush_icache_mm_range(current->mm, \ + (unsigned long)dst, \ + (unsigned long)dst + len); \ + } \ +} while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* __ABI_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/abiv2/inc/abi/ckmmu.h b/arch/csky/abiv2/inc/abi/ckmmu.h new file mode 100644 index 0000000000..64215f2380 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/ckmmu.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CKMMUV2_H +#define __ASM_CSKY_CKMMUV2_H + +#include +#include + +static inline int read_mmu_index(void) +{ + return mfcr("cr<0, 15>"); +} + +static inline void write_mmu_index(int value) +{ + mtcr("cr<0, 15>", value); +} + +static inline int read_mmu_entrylo0(void) +{ + return mfcr("cr<2, 15>"); +} + +static inline int read_mmu_entrylo1(void) +{ + return mfcr("cr<3, 15>"); +} + +static inline void write_mmu_pagemask(int value) +{ + mtcr("cr<6, 15>", value); +} + +static inline int read_mmu_entryhi(void) +{ + return mfcr("cr<4, 15>"); +} + +static inline void write_mmu_entryhi(int value) +{ + mtcr("cr<4, 15>", value); +} + +static inline unsigned long read_mmu_msa0(void) +{ + return mfcr("cr<30, 15>"); +} + +static inline void write_mmu_msa0(unsigned long value) +{ + mtcr("cr<30, 15>", value); +} + +static inline unsigned long read_mmu_msa1(void) +{ + return mfcr("cr<31, 15>"); +} + +static inline void write_mmu_msa1(unsigned long value) +{ + mtcr("cr<31, 15>", value); +} + +/* + * TLB operations. + */ +static inline void tlb_probe(void) +{ + mtcr("cr<8, 15>", 0x80000000); +} + +static inline void tlb_read(void) +{ + mtcr("cr<8, 15>", 0x40000000); +} + +static inline void tlb_invalid_all(void) +{ +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); + asm volatile( + "tlbi.alls \n" + "sync.i \n" + : + : + : "memory"); +#else + mtcr("cr<8, 15>", 0x04000000); +#endif +} + +static inline void local_tlb_invalid_all(void) +{ +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); + asm volatile( + "tlbi.all \n" + "sync.i \n" + : + : + : "memory"); +#else + tlb_invalid_all(); +#endif +} + +static inline void tlb_invalid_indexed(void) +{ + mtcr("cr<8, 15>", 0x02000000); +} + +#define NOP32 ".long 0x4820c400\n" + +static inline void setup_pgd(pgd_t *pgd, int asid) +{ +#ifdef CONFIG_CPU_HAS_TLBI + sync_is(); +#else + mb(); +#endif + asm volatile( +#ifdef CONFIG_CPU_HAS_TLBI + "mtcr %1, cr<28, 15> \n" +#endif + "mtcr %1, cr<29, 15> \n" + "mtcr %0, cr< 4, 15> \n" + ".rept 64 \n" + NOP32 + ".endr \n" + : + :"r"(asid), "r"(__pa(pgd) | BIT(0)) + :"memory"); +} + +static inline pgd_t *get_pgd(void) +{ + return __va(mfcr("cr<29, 15>") & ~BIT(0)); +} +#endif /* __ASM_CSKY_CKMMUV2_H */ diff --git a/arch/csky/abiv2/inc/abi/elf.h b/arch/csky/abiv2/inc/abi/elf.h new file mode 100644 index 0000000000..290f49ef4c --- /dev/null +++ b/arch/csky/abiv2/inc/abi/elf.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_ELF_H +#define __ABI_CSKY_ELF_H + +/* The member sort in array pr_reg[x] is defined by GDB. */ +#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ + pr_reg[0] = regs->pc; \ + pr_reg[1] = regs->a1; \ + pr_reg[2] = regs->a0; \ + pr_reg[3] = regs->sr; \ + pr_reg[4] = regs->a2; \ + pr_reg[5] = regs->a3; \ + pr_reg[6] = regs->regs[0]; \ + pr_reg[7] = regs->regs[1]; \ + pr_reg[8] = regs->regs[2]; \ + pr_reg[9] = regs->regs[3]; \ + pr_reg[10] = regs->regs[4]; \ + pr_reg[11] = regs->regs[5]; \ + pr_reg[12] = regs->regs[6]; \ + pr_reg[13] = regs->regs[7]; \ + pr_reg[14] = regs->regs[8]; \ + pr_reg[15] = regs->regs[9]; \ + pr_reg[16] = regs->usp; \ + pr_reg[17] = regs->lr; \ + pr_reg[18] = regs->exregs[0]; \ + pr_reg[19] = regs->exregs[1]; \ + pr_reg[20] = regs->exregs[2]; \ + pr_reg[21] = regs->exregs[3]; \ + pr_reg[22] = regs->exregs[4]; \ + pr_reg[23] = regs->exregs[5]; \ + pr_reg[24] = regs->exregs[6]; \ + pr_reg[25] = regs->exregs[7]; \ + pr_reg[26] = regs->exregs[8]; \ + pr_reg[27] = regs->exregs[9]; \ + pr_reg[28] = regs->exregs[10]; \ + pr_reg[29] = regs->exregs[11]; \ + pr_reg[30] = regs->exregs[12]; \ + pr_reg[31] = regs->exregs[13]; \ + pr_reg[32] = regs->exregs[14]; \ + pr_reg[33] = regs->tls; \ +} while (0); +#endif /* __ABI_CSKY_ELF_H */ diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h new file mode 100644 index 0000000000..cca63e699b --- /dev/null +++ b/arch/csky/abiv2/inc/abi/entry.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_ENTRY_H +#define __ASM_CSKY_ENTRY_H + +#include +#include + +#define LSAVE_PC 8 +#define LSAVE_PSR 12 +#define LSAVE_A0 24 +#define LSAVE_A1 28 +#define LSAVE_A2 32 +#define LSAVE_A3 36 +#define LSAVE_A4 40 +#define LSAVE_A5 44 + +#define KSPTOUSP +#define USPTOKSP + +#define usp cr<14, 1> + +.macro SAVE_ALL epc_inc + subi sp, 152 + stw tls, (sp, 0) + stw lr, (sp, 4) + + RD_MEH lr + WR_MEH lr + + mfcr lr, epc + movi tls, \epc_inc + add lr, tls + stw lr, (sp, 8) + + mfcr lr, epsr + stw lr, (sp, 12) + btsti lr, 31 + bf 1f + addi lr, sp, 152 + br 2f +1: + mfcr lr, usp +2: + stw lr, (sp, 16) + + stw a0, (sp, 20) + stw a0, (sp, 24) + stw a1, (sp, 28) + stw a2, (sp, 32) + stw a3, (sp, 36) + + addi sp, 40 + stm r4-r13, (sp) + + addi sp, 40 + stm r16-r30, (sp) +#ifdef CONFIG_CPU_HAS_HILO + mfhi lr + stw lr, (sp, 60) + mflo lr + stw lr, (sp, 64) + mfcr lr, cr14 + stw lr, (sp, 68) +#endif + subi sp, 80 +.endm + +.macro RESTORE_ALL + ldw tls, (sp, 0) + ldw lr, (sp, 4) + ldw a0, (sp, 8) + mtcr a0, epc + ldw a0, (sp, 12) + mtcr a0, epsr + btsti a0, 31 + ldw a0, (sp, 16) + mtcr a0, usp + mtcr a0, ss0 + +#ifdef CONFIG_CPU_HAS_HILO + ldw a0, (sp, 140) + mthi a0 + ldw a0, (sp, 144) + mtlo a0 + ldw a0, (sp, 148) + mtcr a0, cr14 +#endif + + ldw a0, (sp, 24) + ldw a1, (sp, 28) + ldw a2, (sp, 32) + ldw a3, (sp, 36) + + addi sp, 40 + ldm r4-r13, (sp) + addi sp, 40 + ldm r16-r30, (sp) + addi sp, 72 + bf 1f + mfcr sp, ss0 +1: + rte +.endm + +.macro SAVE_REGS_FTRACE + subi sp, 152 + stw tls, (sp, 0) + stw lr, (sp, 4) + + mfcr lr, psr + stw lr, (sp, 12) + + addi lr, sp, 152 + stw lr, (sp, 16) + + stw a0, (sp, 20) + stw a0, (sp, 24) + stw a1, (sp, 28) + stw a2, (sp, 32) + stw a3, (sp, 36) + + addi sp, 40 + stm r4-r13, (sp) + + addi sp, 40 + stm r16-r30, (sp) +#ifdef CONFIG_CPU_HAS_HILO + mfhi lr + stw lr, (sp, 60) + mflo lr + stw lr, (sp, 64) + mfcr lr, cr14 + stw lr, (sp, 68) +#endif + subi sp, 80 +.endm + +.macro RESTORE_REGS_FTRACE + ldw tls, (sp, 0) + +#ifdef CONFIG_CPU_HAS_HILO + ldw a0, (sp, 140) + mthi a0 + ldw a0, (sp, 144) + mtlo a0 + ldw a0, (sp, 148) + mtcr a0, cr14 +#endif + + ldw a0, (sp, 24) + ldw a1, (sp, 28) + ldw a2, (sp, 32) + ldw a3, (sp, 36) + + addi sp, 40 + ldm r4-r13, (sp) + addi sp, 40 + ldm r16-r30, (sp) + addi sp, 72 +.endm + +.macro SAVE_SWITCH_STACK + subi sp, 64 + stm r4-r11, (sp) + stw lr, (sp, 32) + stw r16, (sp, 36) + stw r17, (sp, 40) + stw r26, (sp, 44) + stw r27, (sp, 48) + stw r28, (sp, 52) + stw r29, (sp, 56) + stw r30, (sp, 60) +#ifdef CONFIG_CPU_HAS_HILO + subi sp, 16 + mfhi lr + stw lr, (sp, 0) + mflo lr + stw lr, (sp, 4) + mfcr lr, cr14 + stw lr, (sp, 8) +#endif +.endm + +.macro RESTORE_SWITCH_STACK +#ifdef CONFIG_CPU_HAS_HILO + ldw lr, (sp, 0) + mthi lr + ldw lr, (sp, 4) + mtlo lr + ldw lr, (sp, 8) + mtcr lr, cr14 + addi sp, 16 +#endif + ldm r4-r11, (sp) + ldw lr, (sp, 32) + ldw r16, (sp, 36) + ldw r17, (sp, 40) + ldw r26, (sp, 44) + ldw r27, (sp, 48) + ldw r28, (sp, 52) + ldw r29, (sp, 56) + ldw r30, (sp, 60) + addi sp, 64 +.endm + +/* MMU registers operators. */ +.macro RD_MIR rx + mfcr \rx, cr<0, 15> +.endm + +.macro RD_MEH rx + mfcr \rx, cr<4, 15> +.endm + +.macro RD_MCIR rx + mfcr \rx, cr<8, 15> +.endm + +.macro RD_PGDR rx + mfcr \rx, cr<29, 15> +.endm + +.macro RD_PGDR_K rx + mfcr \rx, cr<28, 15> +.endm + +.macro WR_MEH rx + mtcr \rx, cr<4, 15> +.endm + +.macro WR_MCIR rx + mtcr \rx, cr<8, 15> +.endm + +#ifdef CONFIG_PAGE_OFFSET_80000000 +#define MSA_SET cr<30, 15> +#define MSA_CLR cr<31, 15> +#endif + +#ifdef CONFIG_PAGE_OFFSET_A0000000 +#define MSA_SET cr<31, 15> +#define MSA_CLR cr<30, 15> +#endif + +.macro SETUP_MMU + /* Init psr and enable ee */ + lrw r6, DEFAULT_PSR_VALUE + mtcr r6, psr + psrset ee + + /* Invalid I/Dcache BTB BHT */ + movi r6, 7 + lsli r6, 16 + addi r6, (1<<4) | 3 + mtcr r6, cr17 + + /* Invalid all TLB */ + bgeni r6, 26 + mtcr r6, cr<8, 15> /* Set MCIR */ + + /* Check MMU on/off */ + mfcr r6, cr18 + btsti r6, 0 + bt 1f + + /* MMU off: setup mapping tlb entry */ + movi r6, 0 + mtcr r6, cr<6, 15> /* Set MPR with 4K page size */ + + grs r6, 1f /* Get current pa by PC */ + bmaski r7, (PAGE_SHIFT + 1) /* r7 = 0x1fff */ + andn r6, r7 + mtcr r6, cr<4, 15> /* Set MEH */ + + mov r8, r6 + movi r7, 0x00000006 + or r8, r7 + mtcr r8, cr<2, 15> /* Set MEL0 */ + movi r7, 0x00001006 + or r8, r7 + mtcr r8, cr<3, 15> /* Set MEL1 */ + + bgeni r8, 28 + mtcr r8, cr<8, 15> /* Set MCIR to write TLB */ + + br 2f +1: + /* + * MMU on: use origin MSA value from bootloader + * + * cr<30/31, 15> MSA register format: + * 31 - 29 | 28 - 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 + * BA Reserved SH WA B SO SEC C D V + */ + mfcr r6, MSA_SET /* Get MSA */ +2: + lsri r6, 29 + lsli r6, 29 + addi r6, 0x1ce + mtcr r6, MSA_SET /* Set MSA */ + + movi r6, 0 + mtcr r6, MSA_CLR /* Clr MSA */ + + /* enable MMU */ + mfcr r6, cr18 + bseti r6, 0 + mtcr r6, cr18 + + jmpi 3f /* jump to va */ +3: +.endm +#endif /* __ASM_CSKY_ENTRY_H */ diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h new file mode 100644 index 0000000000..aabb793550 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/fpu.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_FPU_H +#define __ASM_CSKY_FPU_H + +#include +#include + +int fpu_libc_helper(struct pt_regs *regs); +void fpu_fpe(struct pt_regs *regs); + +static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); } + +void save_to_user_fp(struct user_fp *user_fp); +void restore_from_user_fp(struct user_fp *user_fp); + +/* + * Define the fesr bit for fpe handle. + */ +#define FPE_ILLE (1 << 16) /* Illegal instruction */ +#define FPE_FEC (1 << 7) /* Input float-point arithmetic exception */ +#define FPE_IDC (1 << 5) /* Input denormalized exception */ +#define FPE_IXC (1 << 4) /* Inexact exception */ +#define FPE_UFC (1 << 3) /* Underflow exception */ +#define FPE_OFC (1 << 2) /* Overflow exception */ +#define FPE_DZC (1 << 1) /* Divide by zero exception */ +#define FPE_IOC (1 << 0) /* Invalid operation exception */ +#define FPE_REGULAR_EXCEPTION (FPE_IXC | FPE_UFC | FPE_OFC | FPE_DZC | FPE_IOC) + +#ifdef CONFIG_OPEN_FPU_IDE +#define IDE_STAT (1 << 5) +#else +#define IDE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_IXE +#define IXE_STAT (1 << 4) +#else +#define IXE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_UFE +#define UFE_STAT (1 << 3) +#else +#define UFE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_OFE +#define OFE_STAT (1 << 2) +#else +#define OFE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_DZE +#define DZE_STAT (1 << 1) +#else +#define DZE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_IOE +#define IOE_STAT (1 << 0) +#else +#define IOE_STAT 0 +#endif + +#endif /* __ASM_CSKY_FPU_H */ diff --git a/arch/csky/abiv2/inc/abi/page.h b/arch/csky/abiv2/inc/abi/page.h new file mode 100644 index 0000000000..cf005f13cd --- /dev/null +++ b/arch/csky/abiv2/inc/abi/page.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +static inline void clear_user_page(void *addr, unsigned long vaddr, + struct page *page) +{ + clear_page(addr); +} + +static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *page) +{ + copy_page(to, from); +} diff --git a/arch/csky/abiv2/inc/abi/pgtable-bits.h b/arch/csky/abiv2/inc/abi/pgtable-bits.h new file mode 100644 index 0000000000..526152bd21 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/pgtable-bits.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PGTABLE_BITS_H +#define __ASM_CSKY_PGTABLE_BITS_H + +/* implemented in software */ +#define _PAGE_ACCESSED (1<<7) +#define _PAGE_READ (1<<8) +#define _PAGE_WRITE (1<<9) +#define _PAGE_PRESENT (1<<10) +#define _PAGE_MODIFIED (1<<11) + +/* We borrow bit 7 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE (1<<7) + +/* implemented in hardware */ +#define _PAGE_GLOBAL (1<<0) +#define _PAGE_VALID (1<<1) +#define _PAGE_DIRTY (1<<2) + +#define _PAGE_SO (1<<5) +#define _PAGE_BUF (1<<6) +#define _PAGE_CACHE (1<<3) +#define _CACHE_MASK _PAGE_CACHE + +#define _CACHE_CACHED (_PAGE_CACHE | _PAGE_BUF) +#define _CACHE_UNCACHED (0) + +#define _PAGE_PROT_NONE _PAGE_WRITE + +/* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTE: + * bit 0: _PAGE_GLOBAL (zero) + * bit 1: _PAGE_VALID (zero) + * bit 2 - 6: swap type + * bit 7: exclusive marker + * bit 8: swap offset[0] + * bit 9: _PAGE_WRITE (zero) + * bit 10: _PAGE_PRESENT (zero) + * bit 11 - 31: swap offset[1 - 21] + */ +#define __swp_type(x) (((x).val >> 2) & 0x1f) +#define __swp_offset(x) ((((x).val >> 8) & 0x1) | \ + (((x).val >> 10) & 0x3ffffe)) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type & 0x1f) << 2) | \ + ((offset & 0x1) << 8) | \ + ((offset & 0x3ffffe) << 10)}) + +#endif /* __ASM_CSKY_PGTABLE_BITS_H */ diff --git a/arch/csky/abiv2/inc/abi/reg_ops.h b/arch/csky/abiv2/inc/abi/reg_ops.h new file mode 100644 index 0000000000..49ba18a647 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/reg_ops.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_REG_OPS_H +#define __ABI_REG_OPS_H +#include + +static inline unsigned int mfcr_hint(void) +{ + return mfcr("cr31"); +} + +static inline unsigned int mfcr_ccr2(void) +{ + return mfcr("cr23"); +} +#endif /* __ABI_REG_OPS_H */ diff --git a/arch/csky/abiv2/inc/abi/regdef.h b/arch/csky/abiv2/inc/abi/regdef.h new file mode 100644 index 0000000000..0933addbc2 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/regdef.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_REGDEF_H +#define __ASM_CSKY_REGDEF_H + +#ifdef __ASSEMBLY__ +#define syscallid r7 +#else +#define syscallid "r7" +#endif + +#define regs_syscallid(regs) regs->regs[3] +#define regs_fp(regs) regs->regs[4] + +/* + * PSR format: + * | 31 | 30-24 | 23-16 | 15 14 | 13-10 | 9 | 8-0 | + * S VEC TM MM + * + * S: Super Mode + * VEC: Exception Number + * TM: Trace Mode + * MM: Memory unaligned addr access + */ +#define DEFAULT_PSR_VALUE 0x80000200 + +#define SYSTRACE_SAVENUM 5 + +#define TRAP0_SIZE 4 + +#endif /* __ASM_CSKY_REGDEF_H */ diff --git a/arch/csky/abiv2/inc/abi/string.h b/arch/csky/abiv2/inc/abi/string.h new file mode 100644 index 0000000000..f01bad2ac4 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/string.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_STRING_H +#define __ABI_CSKY_STRING_H + +#define __HAVE_ARCH_MEMCMP +extern int memcmp(const void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, __kernel_size_t); + +#define __HAVE_ARCH_STRCMP +extern int strcmp(const char *, const char *); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *, const char *); + +#define __HAVE_ARCH_STRLEN +extern __kernel_size_t strlen(const char *); + +#endif /* __ABI_CSKY_STRING_H */ diff --git a/arch/csky/abiv2/inc/abi/switch_context.h b/arch/csky/abiv2/inc/abi/switch_context.h new file mode 100644 index 0000000000..5dd5c3f4ee --- /dev/null +++ b/arch/csky/abiv2/inc/abi/switch_context.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_PTRACE_H +#define __ABI_CSKY_PTRACE_H + +struct switch_stack { +#ifdef CONFIG_CPU_HAS_HILO + unsigned long rhi; + unsigned long rlo; + unsigned long cr14; + unsigned long pad; +#endif + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + + unsigned long r15; + unsigned long r16; + unsigned long r17; + unsigned long r26; + unsigned long r27; + unsigned long r28; + unsigned long r29; + unsigned long r30; +}; +#endif /* __ABI_CSKY_PTRACE_H */ diff --git a/arch/csky/abiv2/inc/abi/vdso.h b/arch/csky/abiv2/inc/abi/vdso.h new file mode 100644 index 0000000000..40fd10d893 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/vdso.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_VDSO_H +#define __ABI_CSKY_VDSO_H + +/* movi r7, 173 */ +#define SET_SYSCALL_ID .long 0x008bea07 + +#endif /* __ABI_CSKY_VDSO_H */ diff --git a/arch/csky/abiv2/mcount.S b/arch/csky/abiv2/mcount.S new file mode 100644 index 0000000000..d745e10c10 --- /dev/null +++ b/arch/csky/abiv2/mcount.S @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include +#include +#include + +/* + * csky-gcc with -pg will put the following asm after prologue: + * push r15 + * jsri _mcount + * + * stack layout after mcount_enter in _mcount(): + * + * current sp => 0:+-------+ + * | a0-a3 | -> must save all argument regs + * +16:+-------+ + * | lr | -> _mcount lr (instrumente function's pc) + * +20:+-------+ + * | fp=r8 | -> instrumented function fp + * +24:+-------+ + * | plr | -> instrumented function lr (parent's pc) + * +-------+ + */ + +.macro mcount_enter + subi sp, 24 + stw a0, (sp, 0) + stw a1, (sp, 4) + stw a2, (sp, 8) + stw a3, (sp, 12) + stw lr, (sp, 16) + stw r8, (sp, 20) +.endm + +.macro mcount_exit + ldw a0, (sp, 0) + ldw a1, (sp, 4) + ldw a2, (sp, 8) + ldw a3, (sp, 12) + ldw t1, (sp, 16) + ldw r8, (sp, 20) + ldw lr, (sp, 24) + addi sp, 28 + jmp t1 +.endm + +.macro mcount_enter_regs + subi sp, 8 + stw lr, (sp, 0) + stw r8, (sp, 4) + SAVE_REGS_FTRACE +.endm + +.macro mcount_exit_regs + RESTORE_REGS_FTRACE + subi sp, 152 + ldw t1, (sp, 4) + addi sp, 152 + ldw r8, (sp, 4) + ldw lr, (sp, 8) + addi sp, 12 + jmp t1 +.endm + +.macro save_return_regs + subi sp, 16 + stw a0, (sp, 0) + stw a1, (sp, 4) + stw a2, (sp, 8) + stw a3, (sp, 12) +.endm + +.macro restore_return_regs + mov lr, a0 + ldw a0, (sp, 0) + ldw a1, (sp, 4) + ldw a2, (sp, 8) + ldw a3, (sp, 12) + addi sp, 16 +.endm + +.macro nop32_stub + nop32 + nop32 + nop32 +.endm + +ENTRY(ftrace_stub) + jmp lr +END(ftrace_stub) + +#ifndef CONFIG_DYNAMIC_FTRACE +ENTRY(_mcount) + mcount_enter + + /* r26 is link register, only used with jsri translation */ + lrw r26, ftrace_trace_function + ldw r26, (r26, 0) + lrw a1, ftrace_stub + cmpne r26, a1 + bf skip_ftrace + + mov a0, lr + subi a0, 4 + ldw a1, (sp, 24) + lrw a2, function_trace_op + ldw a2, (a2, 0) + + jsr r26 + +#ifndef CONFIG_FUNCTION_GRAPH_TRACER +skip_ftrace: + mcount_exit +#else +skip_ftrace: + lrw a0, ftrace_graph_return + ldw a0, (a0, 0) + lrw a1, ftrace_stub + cmpne a0, a1 + bt ftrace_graph_caller + + lrw a0, ftrace_graph_entry + ldw a0, (a0, 0) + lrw a1, ftrace_graph_entry_stub + cmpne a0, a1 + bt ftrace_graph_caller + + mcount_exit +#endif +END(_mcount) +#else /* CONFIG_DYNAMIC_FTRACE */ +ENTRY(_mcount) + mov t1, lr + ldw lr, (sp, 0) + addi sp, 4 + jmp t1 +ENDPROC(_mcount) + +ENTRY(ftrace_caller) + mcount_enter + + ldw a0, (sp, 16) + subi a0, 4 + ldw a1, (sp, 24) + lrw a2, function_trace_op + ldw a2, (a2, 0) + + nop +GLOBAL(ftrace_call) + nop32_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + nop +GLOBAL(ftrace_graph_call) + nop32_stub +#endif + + mcount_exit +ENDPROC(ftrace_caller) +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + mov a0, sp + addi a0, 24 + ldw a1, (sp, 16) + subi a1, 4 + mov a2, r8 + lrw r26, prepare_ftrace_return + jsr r26 + mcount_exit +END(ftrace_graph_caller) + +ENTRY(return_to_handler) + save_return_regs + mov a0, r8 + jsri ftrace_return_to_handler + restore_return_regs + jmp lr +END(return_to_handler) +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +ENTRY(ftrace_regs_caller) + mcount_enter_regs + + lrw t1, PT_FRAME_SIZE + add t1, sp + + ldw a0, (t1, 0) + subi a0, 4 + ldw a1, (t1, 8) + lrw a2, function_trace_op + ldw a2, (a2, 0) + mov a3, sp + + nop +GLOBAL(ftrace_regs_call) + nop32_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + nop +GLOBAL(ftrace_graph_regs_call) + nop32_stub +#endif + + mcount_exit_regs +ENDPROC(ftrace_regs_caller) +#endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/csky/abiv2/memcmp.S b/arch/csky/abiv2/memcmp.S new file mode 100644 index 0000000000..bf0d809f09 --- /dev/null +++ b/arch/csky/abiv2/memcmp.S @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + +ENTRY(memcmp) + /* Test if len less than 4 bytes. */ + mov r3, r0 + movi r0, 0 + mov r12, r4 + cmplti r2, 4 + bt .L_compare_by_byte + + andi r13, r0, 3 + movi r19, 4 + + /* Test if s1 is not 4 bytes aligned. */ + bnez r13, .L_s1_not_aligned + + LABLE_ALIGN +.L_s1_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_compare_by_word + +.L_compare_by_4word: + /* If aligned, load word each time. */ + ldw r20, (r3, 0) + ldw r21, (r1, 0) + /* If s1[i] != s2[i], goto .L_byte_check. */ + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 4) + ldw r21, (r1, 4) + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 8) + ldw r21, (r1, 8) + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 12) + ldw r21, (r1, 12) + cmpne r20, r21 + bt .L_byte_check + + PRE_BNEZAD (r18) + addi a3, 16 + addi a1, 16 + + BNEZAD (r18, .L_compare_by_4word) + +.L_compare_by_word: + zext r18, r2, 3, 2 + bez r18, .L_compare_by_byte +.L_compare_by_word_loop: + ldw r20, (r3, 0) + ldw r21, (r1, 0) + addi r3, 4 + PRE_BNEZAD (r18) + cmpne r20, r21 + addi r1, 4 + bt .L_byte_check + BNEZAD (r18, .L_compare_by_word_loop) + +.L_compare_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_compare_by_byte_loop: + ldb r0, (r3, 0) + ldb r4, (r1, 0) + addi r3, 1 + subu r0, r4 + PRE_BNEZAD (r18) + addi r1, 1 + bnez r0, .L_return + BNEZAD (r18, .L_compare_by_byte_loop) + +.L_return: + mov r4, r12 + rts + +# ifdef __CSKYBE__ +/* d[i] != s[i] in word, so we check byte 0. */ +.L_byte_check: + xtrb0 r0, r20 + xtrb0 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 1 */ + xtrb1 r0, r20 + xtrb1 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 2 */ + xtrb2 r0, r20 + xtrb2 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 3 */ + xtrb3 r0, r20 + xtrb3 r2, r21 + subu r0, r2 +# else +/* s1[i] != s2[i] in word, so we check byte 3. */ +.L_byte_check: + xtrb3 r0, r20 + xtrb3 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 2 */ + xtrb2 r0, r20 + xtrb2 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 1 */ + xtrb1 r0, r20 + xtrb1 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 0 */ + xtrb0 r0, r20 + xtrb0 r2, r21 + subu r0, r2 + br .L_return +# endif /* !__CSKYBE__ */ + +/* Compare when s1 is not aligned. */ +.L_s1_not_aligned: + sub r13, r19, r13 + sub r2, r13 +.L_s1_not_aligned_loop: + ldb r0, (r3, 0) + ldb r4, (r1, 0) + addi r3, 1 + subu r0, r4 + PRE_BNEZAD (r13) + addi r1, 1 + bnez r0, .L_return + BNEZAD (r13, .L_s1_not_aligned_loop) + br .L_s1_aligned +ENDPROC(memcmp) diff --git a/arch/csky/abiv2/memcpy.S b/arch/csky/abiv2/memcpy.S new file mode 100644 index 0000000000..145bf3a936 --- /dev/null +++ b/arch/csky/abiv2/memcpy.S @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + +ENTRY(__memcpy) +ENTRY(memcpy) + /* Test if len less than 4 bytes. */ + mov r12, r0 + cmplti r2, 4 + bt .L_copy_by_byte + + andi r13, r0, 3 + movi r19, 4 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + +/* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + movi r19, 0 + + LABLE_ALIGN +.L_len_larger_16bytes: +#if defined(__CK860__) + ldw r3, (r1, 0) + stw r3, (r0, 0) + ldw r3, (r1, 4) + stw r3, (r0, 4) + ldw r3, (r1, 8) + stw r3, (r0, 8) + ldw r3, (r1, 12) + addi r1, 16 + stw r3, (r0, 12) + addi r0, 16 +#else + ldw r20, (r1, 0) + ldw r21, (r1, 4) + ldw r22, (r1, 8) + ldw r23, (r1, 12) + stw r20, (r0, 0) + stw r21, (r0, 4) + stw r22, (r0, 8) + stw r23, (r0, 12) + PRE_BNEZAD (r18) + addi r1, 16 + addi r0, 16 +#endif + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + bez r18, .L_copy_by_byte +.L_len_less_16bytes_loop: + ldw r3, (r1, 0) + PRE_BNEZAD (r18) + addi r1, 4 + stw r3, (r0, 0) + addi r0, 4 + BNEZAD (r18, .L_len_less_16bytes_loop) + +/* Test if len less than 4 bytes. */ +.L_copy_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_copy_by_byte_loop: + ldb r3, (r1, 0) + PRE_BNEZAD (r18) + addi r1, 1 + stb r3, (r0, 0) + addi r0, 1 + BNEZAD (r18, .L_copy_by_byte_loop) + +.L_return: + mov r0, r12 + rts + +/* + * If dest is not aligned, just copying some bytes makes the + * dest align. + */ +.L_dest_not_aligned: + sub r13, r19, r13 + sub r2, r13 + +/* Makes the dest align. */ +.L_dest_not_aligned_loop: + ldb r3, (r1, 0) + PRE_BNEZAD (r13) + addi r1, 1 + stb r3, (r0, 0) + addi r0, 1 + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 4 + bt .L_copy_by_byte + + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(__memcpy) diff --git a/arch/csky/abiv2/memmove.S b/arch/csky/abiv2/memmove.S new file mode 100644 index 0000000000..5721e73ad3 --- /dev/null +++ b/arch/csky/abiv2/memmove.S @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + + .weak memmove +ENTRY(__memmove) +ENTRY(memmove) + subu r3, r0, r1 + cmphs r3, r2 + bt memcpy + + mov r12, r0 + addu r0, r0, r2 + addu r1, r1, r2 + + /* Test if len less than 4 bytes. */ + cmplti r2, 4 + bt .L_copy_by_byte + + andi r13, r0, 3 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + /* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + movi r19, 0 + + /* len > 16 bytes */ + LABLE_ALIGN +.L_len_larger_16bytes: + subi r1, 16 + subi r0, 16 +#if defined(__CK860__) + ldw r3, (r1, 12) + stw r3, (r0, 12) + ldw r3, (r1, 8) + stw r3, (r0, 8) + ldw r3, (r1, 4) + stw r3, (r0, 4) + ldw r3, (r1, 0) + stw r3, (r0, 0) +#else + ldw r20, (r1, 0) + ldw r21, (r1, 4) + ldw r22, (r1, 8) + ldw r23, (r1, 12) + stw r20, (r0, 0) + stw r21, (r0, 4) + stw r22, (r0, 8) + stw r23, (r0, 12) + PRE_BNEZAD (r18) +#endif + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + bez r18, .L_copy_by_byte +.L_len_less_16bytes_loop: + subi r1, 4 + subi r0, 4 + ldw r3, (r1, 0) + PRE_BNEZAD (r18) + stw r3, (r0, 0) + BNEZAD (r18, .L_len_less_16bytes_loop) + + /* Test if len less than 4 bytes. */ +.L_copy_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_copy_by_byte_loop: + subi r1, 1 + subi r0, 1 + ldb r3, (r1, 0) + PRE_BNEZAD (r18) + stb r3, (r0, 0) + BNEZAD (r18, .L_copy_by_byte_loop) + +.L_return: + mov r0, r12 + rts + + /* If dest is not aligned, just copy some bytes makes the dest + align. */ +.L_dest_not_aligned: + sub r2, r13 +.L_dest_not_aligned_loop: + subi r1, 1 + subi r0, 1 + /* Makes the dest align. */ + ldb r3, (r1, 0) + PRE_BNEZAD (r13) + stb r3, (r0, 0) + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 4 + bt .L_copy_by_byte + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/csky/abiv2/memset.S b/arch/csky/abiv2/memset.S new file mode 100644 index 0000000000..a7e7d994b6 --- /dev/null +++ b/arch/csky/abiv2/memset.S @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + + .weak memset +ENTRY(__memset) +ENTRY(memset) + /* Test if len less than 4 bytes. */ + mov r12, r0 + cmplti r2, 8 + bt .L_set_by_byte + + andi r13, r0, 3 + movi r19, 4 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + /* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + zextb r3, r1 + lsli r1, 8 + or r1, r3 + lsli r3, r1, 16 + or r3, r1 + + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + + LABLE_ALIGN +.L_len_larger_16bytes: + stw r3, (r0, 0) + stw r3, (r0, 4) + stw r3, (r0, 8) + stw r3, (r0, 12) + PRE_BNEZAD (r18) + addi r0, 16 + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + andi r2, 3 + bez r18, .L_set_by_byte +.L_len_less_16bytes_loop: + stw r3, (r0, 0) + PRE_BNEZAD (r18) + addi r0, 4 + BNEZAD (r18, .L_len_less_16bytes_loop) + + /* Test if len less than 4 bytes. */ +.L_set_by_byte: + zext r18, r2, 2, 0 + bez r18, .L_return +.L_set_by_byte_loop: + stb r1, (r0, 0) + PRE_BNEZAD (r18) + addi r0, 1 + BNEZAD (r18, .L_set_by_byte_loop) + +.L_return: + mov r0, r12 + rts + + /* If dest is not aligned, just set some bytes makes the dest + align. */ + +.L_dest_not_aligned: + sub r13, r19, r13 + sub r2, r13 +.L_dest_not_aligned_loop: + /* Makes the dest align. */ + stb r1, (r0, 0) + PRE_BNEZAD (r13) + addi r0, 1 + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 8 + bt .L_set_by_byte + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(memset) +ENDPROC(__memset) diff --git a/arch/csky/abiv2/strcmp.S b/arch/csky/abiv2/strcmp.S new file mode 100644 index 0000000000..f8403f4d8c --- /dev/null +++ b/arch/csky/abiv2/strcmp.S @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + +ENTRY(strcmp) + mov a3, a0 + /* Check if the s1 addr is aligned. */ + xor a2, a3, a1 + andi a2, 0x3 + bnez a2, 7f + andi t1, a0, 0x3 + bnez t1, 5f + +1: + /* If aligned, load word each time. */ + ldw t0, (a3, 0) + ldw t1, (a1, 0) + /* If s1[i] != s2[i], goto 2f. */ + cmpne t0, t1 + bt 2f + /* If s1[i] == s2[i], check if s1 or s2 is at the end. */ + tstnbz t0 + /* If at the end, goto 3f (finish comparing). */ + bf 3f + + ldw t0, (a3, 4) + ldw t1, (a1, 4) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 8) + ldw t1, (a1, 8) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 12) + ldw t1, (a1, 12) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 16) + ldw t1, (a1, 16) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 20) + ldw t1, (a1, 20) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 24) + ldw t1, (a1, 24) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 28) + ldw t1, (a1, 28) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + addi a3, 32 + addi a1, 32 + + br 1b + +# ifdef __CSKYBE__ + /* d[i] != s[i] in word, so we check byte 0. */ +2: + xtrb0 a0, t0 + xtrb0 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 1 */ + xtrb1 a0, t0 + xtrb1 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 2 */ + xtrb2 a0, t0 + xtrb2 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 3 */ + xtrb3 a0, t0 + xtrb3 a2, t1 + subu a0, a2 +# else + /* s1[i] != s2[i] in word, so we check byte 3. */ +2: + xtrb3 a0, t0 + xtrb3 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 2 */ + xtrb2 a0, t0 + xtrb2 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 1 */ + xtrb1 a0, t0 + xtrb1 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 0 */ + xtrb0 a0, t0 + xtrb0 a2, t1 + subu a0, a2 + +# endif /* !__CSKYBE__ */ + jmp lr +3: + movi a0, 0 +4: + jmp lr + + /* Compare when s1 or s2 is not aligned. */ +5: + subi t1, 4 +6: + ldb a0, (a3, 0) + ldb a2, (a1, 0) + subu a0, a2 + bez a2, 4b + bnez a0, 4b + addi t1, 1 + addi a1, 1 + addi a3, 1 + bnez t1, 6b + br 1b + +7: + ldb a0, (a3, 0) + addi a3, 1 + ldb a2, (a1, 0) + addi a1, 1 + subu a0, a2 + bnez a0, 4b + bnez a2, 7b + jmp r15 +ENDPROC(strcmp) diff --git a/arch/csky/abiv2/strcpy.S b/arch/csky/abiv2/strcpy.S new file mode 100644 index 0000000000..3c6d3f6a57 --- /dev/null +++ b/arch/csky/abiv2/strcpy.S @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + +ENTRY(strcpy) + mov a3, a0 + /* Check if the src addr is aligned. */ + andi t0, a1, 3 + bnez t0, 11f +1: + /* Check if all the bytes in the word are not zero. */ + ldw a2, (a1) + tstnbz a2 + bf 9f + stw a2, (a3) + + ldw a2, (a1, 4) + tstnbz a2 + bf 2f + stw a2, (a3, 4) + + ldw a2, (a1, 8) + tstnbz a2 + bf 3f + stw a2, (a3, 8) + + ldw a2, (a1, 12) + tstnbz a2 + bf 4f + stw a2, (a3, 12) + + ldw a2, (a1, 16) + tstnbz a2 + bf 5f + stw a2, (a3, 16) + + ldw a2, (a1, 20) + tstnbz a2 + bf 6f + stw a2, (a3, 20) + + ldw a2, (a1, 24) + tstnbz a2 + bf 7f + stw a2, (a3, 24) + + ldw a2, (a1, 28) + tstnbz a2 + bf 8f + stw a2, (a3, 28) + + addi a3, 32 + addi a1, 32 + br 1b + + +2: + addi a3, 4 + br 9f + +3: + addi a3, 8 + br 9f + +4: + addi a3, 12 + br 9f + +5: + addi a3, 16 + br 9f + +6: + addi a3, 20 + br 9f + +7: + addi a3, 24 + br 9f + +8: + addi a3, 28 +9: +# ifdef __CSKYBE__ + xtrb0 t0, a2 + st.b t0, (a3) + bez t0, 10f + xtrb1 t0, a2 + st.b t0, (a3, 1) + bez t0, 10f + xtrb2 t0, a2 + st.b t0, (a3, 2) + bez t0, 10f + stw a2, (a3) +# else + xtrb3 t0, a2 + st.b t0, (a3) + bez t0, 10f + xtrb2 t0, a2 + st.b t0, (a3, 1) + bez t0, 10f + xtrb1 t0, a2 + st.b t0, (a3, 2) + bez t0, 10f + stw a2, (a3) +# endif /* !__CSKYBE__ */ +10: + jmp lr + +11: + subi t0, 4 +12: + ld.b a2, (a1) + st.b a2, (a3) + bez a2, 10b + addi t0, 1 + addi a1, a1, 1 + addi a3, a3, 1 + bnez t0, 12b + jbr 1b +ENDPROC(strcpy) diff --git a/arch/csky/abiv2/strksyms.c b/arch/csky/abiv2/strksyms.c new file mode 100644 index 0000000000..8d1fd28c6c --- /dev/null +++ b/arch/csky/abiv2/strksyms.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_STRING_OPS +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memmove); +#endif +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strcpy); +EXPORT_SYMBOL(strlen); diff --git a/arch/csky/abiv2/strlen.S b/arch/csky/abiv2/strlen.S new file mode 100644 index 0000000000..bcdd70764d --- /dev/null +++ b/arch/csky/abiv2/strlen.S @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include +#include "sysdep.h" + +ENTRY(strlen) + /* Check if the start addr is aligned. */ + mov r3, r0 + andi r1, r0, 3 + movi r2, 4 + movi r0, 0 + bnez r1, .L_start_not_aligned + + LABLE_ALIGN +.L_start_addr_aligned: + /* Check if all the bytes in the word are not zero. */ + ldw r1, (r3) + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 4) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 8) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 12) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 16) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 20) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 24) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 28) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + addi r0, 4 + addi r3, 32 + br .L_start_addr_aligned + +.L_string_tail: +# ifdef __CSKYBE__ + xtrb0 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb1 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb2 r3, r1 + bez r3, .L_return + addi r0, 1 +# else + xtrb3 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb2 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb1 r3, r1 + bez r3, .L_return + addi r0, 1 +# endif /* !__CSKYBE__ */ + +.L_return: + rts + +.L_start_not_aligned: + sub r2, r2, r1 +.L_start_not_aligned_loop: + ldb r1, (r3) + PRE_BNEZAD (r2) + addi r3, 1 + bez r1, .L_return + addi r0, 1 + BNEZAD (r2, .L_start_not_aligned_loop) + br .L_start_addr_aligned +ENDPROC(strlen) diff --git a/arch/csky/abiv2/sysdep.h b/arch/csky/abiv2/sysdep.h new file mode 100644 index 0000000000..61abe9201c --- /dev/null +++ b/arch/csky/abiv2/sysdep.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __SYSDEP_H +#define __SYSDEP_H + +#ifdef __ASSEMBLER__ + +#if defined(__CK860__) +#define LABLE_ALIGN \ + .balignw 16, 0x6c03 + +#define PRE_BNEZAD(R) + +#define BNEZAD(R, L) \ + bnezad R, L +#else +#define LABLE_ALIGN \ + .balignw 8, 0x6c03 + +#define PRE_BNEZAD(R) \ + subi R, 1 + +#define BNEZAD(R, L) \ + bnez R, L +#endif + +#endif + +#endif -- cgit v1.2.3