diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /arch/csky | |
parent | Initial commit. (diff) | |
download | linux-upstream/5.10.209.tar.xz linux-upstream/5.10.209.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/csky')
160 files changed, 14816 insertions, 0 deletions
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig new file mode 100644 index 000000000..7bf0a617e --- /dev/null +++ b/arch/csky/Kconfig @@ -0,0 +1,312 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CSKY + def_bool y + select ARCH_32BIT_OFF_T + select ARCH_HAS_DMA_PREP_COHERENT + select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 + select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 + select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT + select COMMON_CLK + select CLKSRC_MMIO + select CSKY_MPINTC if CPU_CK860 + select CSKY_MP_TIMER if CPU_CK860 + select CSKY_APB_INTC + select DMA_DIRECT_REMAP + select IRQ_DOMAIN + select HANDLE_DOMAIN_IRQ + select DW_APB_TIMER_OF + select GENERIC_IOREMAP + select GENERIC_LIB_ASHLDI3 + select GENERIC_LIB_ASHRDI3 + select GENERIC_LIB_LSHRDI3 + select GENERIC_LIB_MULDI3 + select GENERIC_LIB_CMPDI2 + select GENERIC_LIB_UCMPDI2 + select GENERIC_ALLOCATOR + select GENERIC_ATOMIC64 + select GENERIC_CLOCKEVENTS + select GENERIC_CPU_DEVICES + select GENERIC_IRQ_CHIP + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_IRQ_MULTI_HANDLER + select GENERIC_SCHED_CLOCK + select GENERIC_SMP_IDLE_THREAD + select GX6605S_TIMER if CPU_CK610 + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_MMAP_RND_BITS + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_CONTEXT_TRACKING + select HAVE_VIRT_CPU_ACCOUNTING_GEN + select HAVE_DEBUG_BUGVERBOSE + select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_ERROR_INJECTION + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZO + select HAVE_KERNEL_LZMA + select HAVE_KPROBES if !CPU_CK610 + select HAVE_KPROBES_ON_FTRACE if !CPU_CK610 + select HAVE_KRETPROBES if !CPU_CK610 + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_DMA_CONTIGUOUS + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RSEQ + select HAVE_STACKPROTECTOR + select HAVE_SYSCALL_TRACEPOINTS + select MAY_HAVE_SPARSE_IRQ + select MODULES_USE_ELF_RELA if MODULES + select OF + select OF_EARLY_FLATTREE + select PERF_USE_VMALLOC if CPU_CK610 + select RTC_LIB + select TIMER_OF + select USB_ARCH_HAS_EHCI + select USB_ARCH_HAS_OHCI + select GENERIC_PCI_IOMAP + select HAVE_PCI + select PCI_DOMAINS_GENERIC if PCI + select PCI_SYSCALL if PCI + select PCI_MSI if PCI + select SET_FS + +config LOCKDEP_SUPPORT + def_bool y + +config ARCH_SUPPORTS_UPROBES + def_bool y if !CPU_CK610 + +config CPU_HAS_CACHEV2 + bool + +config CPU_HAS_FPUV2 + bool + +config CPU_HAS_HILO + bool + +config CPU_HAS_TLBI + bool + +config CPU_HAS_LDSTEX + bool + help + For SMP, CPU needs "ldex&stex" instructions for atomic operations. + +config CPU_NEED_TLBSYNC + bool + +config CPU_NEED_SOFTALIGN + bool + +config CPU_NO_USER_BKPT + bool + help + For abiv2 we couldn't use "trap 1" as user space bkpt in gdbserver, because + abiv2 is 16/32bit instruction set and "trap 1" is 32bit. + So we need a 16bit instruction as user space bkpt, and it will cause an illegal + instruction exception. + In kernel we parse the *regs->pc to determine whether to send SIGTRAP or not. + +config GENERIC_CALIBRATE_DELAY + def_bool y + +config GENERIC_CSUM + def_bool y + +config GENERIC_HWEIGHT + def_bool y + +config MMU + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +config TIME_LOW_RES + def_bool y + +config TRACE_IRQFLAGS_SUPPORT + def_bool y + +config CPU_TLB_SIZE + int + default "128" if (CPU_CK610 || CPU_CK807 || CPU_CK810) + default "1024" if (CPU_CK860) + +config CPU_ASID_BITS + int + default "8" if (CPU_CK610 || CPU_CK807 || CPU_CK810) + default "12" if (CPU_CK860) + +config L1_CACHE_SHIFT + int + default "4" if (CPU_CK610) + default "5" if (CPU_CK807 || CPU_CK810) + default "6" if (CPU_CK860) + +config ARCH_MMAP_RND_BITS_MIN + default 8 + +# max bits determined by the following formula: +# VA_BITS - PAGE_SHIFT - 3 +config ARCH_MMAP_RND_BITS_MAX + default 17 + +menu "Processor type and features" + +choice + prompt "CPU MODEL" + default CPU_CK807 + +config CPU_CK610 + bool "CSKY CPU ck610" + select CPU_NEED_TLBSYNC + select CPU_NEED_SOFTALIGN + select CPU_NO_USER_BKPT + +config CPU_CK810 + bool "CSKY CPU ck810" + select CPU_HAS_HILO + select CPU_NEED_TLBSYNC + +config CPU_CK807 + bool "CSKY CPU ck807" + select CPU_HAS_HILO + +config CPU_CK860 + bool "CSKY CPU ck860" + select CPU_HAS_TLBI + select CPU_HAS_CACHEV2 + select CPU_HAS_LDSTEX + select CPU_HAS_FPUV2 +endchoice + +choice + prompt "C-SKY PMU type" + depends on PERF_EVENTS + depends on CPU_CK807 || CPU_CK810 || CPU_CK860 + +config CPU_PMU_NONE + bool "None" + +config CSKY_PMU_V1 + bool "Performance Monitoring Unit Ver.1" + +endchoice + +choice + prompt "Power Manager Instruction (wait/doze/stop)" + default CPU_PM_NONE + +config CPU_PM_NONE + bool "None" + +config CPU_PM_WAIT + bool "wait" + +config CPU_PM_DOZE + bool "doze" + +config CPU_PM_STOP + bool "stop" +endchoice + +menuconfig HAVE_TCM + bool "Tightly-Coupled/Sram Memory" + select GENERIC_ALLOCATOR + help + The implementation are not only used by TCM (Tightly-Coupled Meory) + but also used by sram on SOC bus. It follow existed linux tcm + software interface, so that old tcm application codes could be + re-used directly. + +if HAVE_TCM +config ITCM_RAM_BASE + hex "ITCM ram base" + default 0xffffffff + +config ITCM_NR_PAGES + int "Page count of ITCM size: NR*4KB" + range 1 256 + default 32 + +config HAVE_DTCM + bool "DTCM Support" + +config DTCM_RAM_BASE + hex "DTCM ram base" + depends on HAVE_DTCM + default 0xffffffff + +config DTCM_NR_PAGES + int "Page count of DTCM size: NR*4KB" + depends on HAVE_DTCM + range 1 256 + default 32 +endif + +config CPU_HAS_VDSP + bool "CPU has VDSP coprocessor" + depends on CPU_HAS_FPU && CPU_HAS_FPUV2 + +config CPU_HAS_FPU + bool "CPU has FPU coprocessor" + depends on CPU_CK807 || CPU_CK810 || CPU_CK860 + +config CPU_HAS_ICACHE_INS + bool "CPU has Icache invalidate instructions" + depends on CPU_HAS_CACHEV2 + +config CPU_HAS_TEE + bool "CPU has Trusted Execution Environment" + depends on CPU_CK810 + +config SMP + bool "Symmetric Multi-Processing (SMP) support for C-SKY" + depends on CPU_CK860 + default n + +config NR_CPUS + int "Maximum number of CPUs (2-32)" + range 2 32 + depends on SMP + default "4" + +config HIGHMEM + bool "High Memory Support" + depends on !CPU_CK610 + default y + +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + default "11" + +config DRAM_BASE + hex "DRAM start addr (the same with memory-section in dts)" + default 0x0 + +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + select GENERIC_IRQ_MIGRATION + depends on SMP + help + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu/cpu1/hotplug/target. + + Say N if you want to disable CPU hotplug. +endmenu + +source "arch/csky/Kconfig.platforms" + +source "kernel/Kconfig.hz" diff --git a/arch/csky/Kconfig.debug b/arch/csky/Kconfig.debug new file mode 100644 index 000000000..295942fe3 --- /dev/null +++ b/arch/csky/Kconfig.debug @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +# dummy file, do not delete diff --git a/arch/csky/Kconfig.platforms b/arch/csky/Kconfig.platforms new file mode 100644 index 000000000..639e17f4e --- /dev/null +++ b/arch/csky/Kconfig.platforms @@ -0,0 +1,9 @@ +menu "Platform drivers selection" + +config ARCH_CSKY_DW_APB_ICTL + bool "Select dw-apb interrupt controller" + select DW_APB_ICTL + default y + help + This enables support for snps dw-apb-ictl +endmenu diff --git a/arch/csky/Makefile b/arch/csky/Makefile new file mode 100644 index 000000000..37f593a4b --- /dev/null +++ b/arch/csky/Makefile @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: GPL-2.0-only +OBJCOPYFLAGS :=-O binary +GZFLAGS :=-9 + +ifdef CONFIG_CPU_HAS_FPU +FPUEXT = f +endif + +ifdef CONFIG_CPU_HAS_VDSP +VDSPEXT = v +endif + +ifdef CONFIG_CPU_HAS_TEE +TEEEXT = t +endif + +ifdef CONFIG_CPU_CK610 +CPUTYPE = ck610 +CSKYABI = abiv1 +endif + +ifdef CONFIG_CPU_CK810 +CPUTYPE = ck810 +CSKYABI = abiv2 +endif + +ifdef CONFIG_CPU_CK807 +CPUTYPE = ck807 +CSKYABI = abiv2 +endif + +ifdef CONFIG_CPU_CK860 +CPUTYPE = ck860 +CSKYABI = abiv2 +endif + +ifneq ($(CSKYABI),) +MCPU_STR = $(CPUTYPE)$(FPUEXT)$(VDSPEXT)$(TEEEXT) +KBUILD_CFLAGS += -mcpu=$(CPUTYPE) -Wa,-mcpu=$(MCPU_STR) +KBUILD_CFLAGS += -DCSKYCPU_DEF_NAME=\"$(MCPU_STR)\" +KBUILD_CFLAGS += -msoft-float -mdiv +KBUILD_CFLAGS += -fno-tree-vectorize +endif + +KBUILD_CFLAGS += -pipe +ifeq ($(CSKYABI),abiv2) +KBUILD_CFLAGS += -mno-stack-size +endif + +ifdef CONFIG_FRAME_POINTER +KBUILD_CFLAGS += -mbacktrace +endif + +abidirs := $(patsubst %,arch/csky/%/,$(CSKYABI)) +KBUILD_CFLAGS += $(patsubst %,-I$(srctree)/%inc,$(abidirs)) + +KBUILD_CPPFLAGS += -mlittle-endian +LDFLAGS += -EL + +KBUILD_AFLAGS += $(KBUILD_CFLAGS) + +head-y := arch/csky/kernel/head.o + +core-y += arch/csky/kernel/ +core-y += arch/csky/mm/ +core-y += arch/csky/$(CSKYABI)/ + +libs-y += arch/csky/lib/ \ + $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) + +boot := arch/csky/boot +core-y += $(boot)/dts/ + +all: zImage + +zImage Image uImage: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + +define archhelp + echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' + echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' + echo ' uImage - U-Boot wrapped zImage' +endef diff --git a/arch/csky/abiv1/Makefile b/arch/csky/abiv1/Makefile new file mode 100644 index 000000000..601ce3b2f --- /dev/null +++ b/arch/csky/abiv1/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CPU_NEED_SOFTALIGN) += alignment.o +obj-y += bswapdi.o +obj-y += bswapsi.o +obj-y += cacheflush.o +obj-y += mmap.o +obj-y += memcpy.o +obj-y += strksyms.o diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c new file mode 100644 index 000000000..2df115d0e --- /dev/null +++ b/arch/csky/abiv1/alignment.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/kernel.h> +#include <linux/uaccess.h> +#include <linux/ptrace.h> + +static int align_kern_enable = 1; +static int align_usr_enable = 1; +static int align_kern_count = 0; +static int align_usr_count = 0; + +static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx) +{ + return rx == 15 ? regs->lr : *((uint32_t *)&(regs->a0) - 2 + rx); +} + +static inline void put_ptreg(struct pt_regs *regs, uint32_t rx, uint32_t val) +{ + if (rx == 15) + regs->lr = val; + else + *((uint32_t *)&(regs->a0) - 2 + rx) = val; +} + +/* + * Get byte-value from addr and set it to *valp. + * + * Success: return 0 + * Failure: return 1 + */ +static int ldb_asm(uint32_t addr, uint32_t *valp) +{ + uint32_t val; + int err; + + asm volatile ( + "movi %0, 0\n" + "1:\n" + "ldb %1, (%2)\n" + "br 3f\n" + "2:\n" + "movi %0, 1\n" + "br 3f\n" + ".section __ex_table,\"a\"\n" + ".align 2\n" + ".long 1b, 2b\n" + ".previous\n" + "3:\n" + : "=&r"(err), "=r"(val) + : "r" (addr) + ); + + *valp = val; + + return err; +} + +/* + * Put byte-value to addr. + * + * Success: return 0 + * Failure: return 1 + */ +static int stb_asm(uint32_t addr, uint32_t val) +{ + int err; + + asm volatile ( + "movi %0, 0\n" + "1:\n" + "stb %1, (%2)\n" + "br 3f\n" + "2:\n" + "movi %0, 1\n" + "br 3f\n" + ".section __ex_table,\"a\"\n" + ".align 2\n" + ".long 1b, 2b\n" + ".previous\n" + "3:\n" + : "=&r"(err) + : "r"(val), "r" (addr) + ); + + return err; +} + +/* + * Get half-word from [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int ldh_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1; + + if (ldb_asm(addr, &byte0)) + return 1; + addr += 1; + if (ldb_asm(addr, &byte1)) + return 1; + + byte0 |= byte1 << 8; + put_ptreg(regs, rz, byte0); + + return 0; +} + +/* + * Store half-word to [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int sth_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1; + + byte0 = byte1 = get_ptreg(regs, rz); + + byte0 &= 0xff; + + if (stb_asm(addr, byte0)) + return 1; + + addr += 1; + byte1 = (byte1 >> 8) & 0xff; + if (stb_asm(addr, byte1)) + return 1; + + return 0; +} + +/* + * Get word from [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int ldw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1, byte2, byte3; + + if (ldb_asm(addr, &byte0)) + return 1; + + addr += 1; + if (ldb_asm(addr, &byte1)) + return 1; + + addr += 1; + if (ldb_asm(addr, &byte2)) + return 1; + + addr += 1; + if (ldb_asm(addr, &byte3)) + return 1; + + byte0 |= byte1 << 8; + byte0 |= byte2 << 16; + byte0 |= byte3 << 24; + + put_ptreg(regs, rz, byte0); + + return 0; +} + +/* + * Store word to [rx + imm] + * + * Success: return 0 + * Failure: return 1 + */ +static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr) +{ + uint32_t byte0, byte1, byte2, byte3; + + byte0 = byte1 = byte2 = byte3 = get_ptreg(regs, rz); + + byte0 &= 0xff; + + if (stb_asm(addr, byte0)) + return 1; + + addr += 1; + byte1 = (byte1 >> 8) & 0xff; + if (stb_asm(addr, byte1)) + return 1; + + addr += 1; + byte2 = (byte2 >> 16) & 0xff; + if (stb_asm(addr, byte2)) + return 1; + + addr += 1; + byte3 = (byte3 >> 24) & 0xff; + if (stb_asm(addr, byte3)) + return 1; + + return 0; +} + +extern int fixup_exception(struct pt_regs *regs); + +#define OP_LDH 0xc000 +#define OP_STH 0xd000 +#define OP_LDW 0x8000 +#define OP_STW 0x9000 + +void csky_alignment(struct pt_regs *regs) +{ + int ret; + uint16_t tmp; + uint32_t opcode = 0; + uint32_t rx = 0; + uint32_t rz = 0; + uint32_t imm = 0; + uint32_t addr = 0; + + if (!user_mode(regs)) + goto kernel_area; + + if (!align_usr_enable) { + pr_err("%s user disabled.\n", __func__); + goto bad_area; + } + + align_usr_count++; + + ret = get_user(tmp, (uint16_t *)instruction_pointer(regs)); + if (ret) { + pr_err("%s get_user failed.\n", __func__); + goto bad_area; + } + + goto good_area; + +kernel_area: + if (!align_kern_enable) { + pr_err("%s kernel disabled.\n", __func__); + goto bad_area; + } + + align_kern_count++; + + tmp = *(uint16_t *)instruction_pointer(regs); + +good_area: + opcode = (uint32_t)tmp; + + rx = opcode & 0xf; + imm = (opcode >> 4) & 0xf; + rz = (opcode >> 8) & 0xf; + opcode &= 0xf000; + + if (rx == 0 || rx == 1 || rz == 0 || rz == 1) + goto bad_area; + + switch (opcode) { + case OP_LDH: + addr = get_ptreg(regs, rx) + (imm << 1); + ret = ldh_c(regs, rz, addr); + break; + case OP_LDW: + addr = get_ptreg(regs, rx) + (imm << 2); + ret = ldw_c(regs, rz, addr); + break; + case OP_STH: + addr = get_ptreg(regs, rx) + (imm << 1); + ret = sth_c(regs, rz, addr); + break; + case OP_STW: + addr = get_ptreg(regs, rx) + (imm << 2); + ret = stw_c(regs, rz, addr); + break; + } + + if (ret) + goto bad_area; + + regs->pc += 2; + + return; + +bad_area: + if (!user_mode(regs)) { + if (fixup_exception(regs)) + return; + + bust_spinlocks(1); + pr_alert("%s opcode: %x, rz: %d, rx: %d, imm: %d, addr: %x.\n", + __func__, opcode, rz, rx, imm, addr); + show_regs(regs); + bust_spinlocks(0); + make_task_dead(SIGKILL); + } + + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr); +} + +static struct ctl_table alignment_tbl[5] = { + { + .procname = "kernel_enable", + .data = &align_kern_enable, + .maxlen = sizeof(align_kern_enable), + .mode = 0666, + .proc_handler = &proc_dointvec + }, + { + .procname = "user_enable", + .data = &align_usr_enable, + .maxlen = sizeof(align_usr_enable), + .mode = 0666, + .proc_handler = &proc_dointvec + }, + { + .procname = "kernel_count", + .data = &align_kern_count, + .maxlen = sizeof(align_kern_count), + .mode = 0666, + .proc_handler = &proc_dointvec + }, + { + .procname = "user_count", + .data = &align_usr_count, + .maxlen = sizeof(align_usr_count), + .mode = 0666, + .proc_handler = &proc_dointvec + }, + {} +}; + +static struct ctl_table sysctl_table[2] = { + { + .procname = "csky_alignment", + .mode = 0555, + .child = alignment_tbl}, + {} +}; + +static struct ctl_path sysctl_path[2] = { + {.procname = "csky"}, + {} +}; + +static int __init csky_alignment_init(void) +{ + register_sysctl_paths(sysctl_path, sysctl_table); + return 0; +} + +arch_initcall(csky_alignment_init); diff --git a/arch/csky/abiv1/bswapdi.c b/arch/csky/abiv1/bswapdi.c new file mode 100644 index 000000000..f50a1d6e3 --- /dev/null +++ b/arch/csky/abiv1/bswapdi.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/export.h> +#include <linux/compiler.h> +#include <uapi/linux/swab.h> + +unsigned long long notrace __bswapdi2(unsigned long long u) +{ + return ___constant_swab64(u); +} +EXPORT_SYMBOL(__bswapdi2); diff --git a/arch/csky/abiv1/bswapsi.c b/arch/csky/abiv1/bswapsi.c new file mode 100644 index 000000000..0f79182e8 --- /dev/null +++ b/arch/csky/abiv1/bswapsi.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/export.h> +#include <linux/compiler.h> +#include <uapi/linux/swab.h> + +unsigned int notrace __bswapsi2(unsigned int u) +{ + return ___constant_swab32(u); +} +EXPORT_SYMBOL(__bswapsi2); diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c new file mode 100644 index 000000000..9f1fe80cc --- /dev/null +++ b/arch/csky/abiv1/cacheflush.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/fs.h> +#include <linux/syscalls.h> +#include <linux/spinlock.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/cacheflush.h> +#include <asm/cachectl.h> + +#define PG_dcache_clean PG_arch_1 + +void flush_dcache_page(struct page *page) +{ + struct address_space *mapping; + + if (page == ZERO_PAGE(0)) + return; + + mapping = page_mapping_file(page); + + if (mapping && !page_mapcount(page)) + clear_bit(PG_dcache_clean, &page->flags); + else { + dcache_wbinv_all(); + if (mapping) + icache_inv_all(); + set_bit(PG_dcache_clean, &page->flags); + } +} +EXPORT_SYMBOL(flush_dcache_page); + +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + unsigned long pfn = pte_pfn(*ptep); + struct page *page; + + if (!pfn_valid(pfn)) + return; + + page = pfn_to_page(pfn); + if (page == ZERO_PAGE(0)) + return; + + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) + dcache_wbinv_all(); + + if (page_mapping_file(page)) { + if (vma->vm_flags & VM_EXEC) + icache_inv_all(); + } +} + +void flush_kernel_dcache_page(struct page *page) +{ + struct address_space *mapping; + + mapping = page_mapping_file(page); + + if (!mapping || mapping_mapped(mapping)) + dcache_wbinv_all(); +} +EXPORT_SYMBOL(flush_kernel_dcache_page); + +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + dcache_wbinv_all(); + + if (vma->vm_flags & VM_EXEC) + icache_inv_all(); +} diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h new file mode 100644 index 000000000..d3e04208d --- /dev/null +++ b/arch/csky/abiv1/inc/abi/cacheflush.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_CSKY_CACHEFLUSH_H +#define __ABI_CSKY_CACHEFLUSH_H + +#include <linux/mm.h> +#include <asm/string.h> +#include <asm/cache.h> + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +extern void flush_dcache_page(struct page *); + +#define flush_cache_mm(mm) dcache_wbinv_all() +#define flush_cache_page(vma, page, pfn) cache_wbinv_all() +#define flush_cache_dup_mm(mm) cache_wbinv_all() + +#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +extern void flush_kernel_dcache_page(struct page *); + +#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) +#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) + +static inline void flush_kernel_vmap_range(void *addr, int size) +{ + dcache_wbinv_all(); +} +static inline void invalidate_kernel_vmap_range(void *addr, int size) +{ + dcache_wbinv_all(); +} + +#define ARCH_HAS_FLUSH_ANON_PAGE +static inline void flush_anon_page(struct vm_area_struct *vma, + struct page *page, unsigned long vmaddr) +{ + if (PageAnon(page)) + cache_wbinv_all(); +} + +/* + * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. + * Use cache_wbinv_all() here and need to be improved in future. + */ +extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +#define flush_cache_vmap(start, end) cache_wbinv_all() +#define flush_cache_vunmap(start, end) cache_wbinv_all() + +#define flush_icache_page(vma, page) do {} while (0); +#define flush_icache_range(start, end) cache_wbinv_range(start, end) +#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end) +#define flush_icache_deferred(mm) do {} while (0); + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy(dst, src, len); \ +} while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy(dst, src, len); \ + cache_wbinv_all(); \ +} while (0) + +#endif /* __ABI_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/abiv1/inc/abi/ckmmu.h b/arch/csky/abiv1/inc/abi/ckmmu.h new file mode 100644 index 000000000..ba8eb5870 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/ckmmu.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_CKMMUV1_H +#define __ASM_CSKY_CKMMUV1_H +#include <abi/reg_ops.h> + +static inline int read_mmu_index(void) +{ + return cprcr("cpcr0"); +} + +static inline void write_mmu_index(int value) +{ + cpwcr("cpcr0", value); +} + +static inline int read_mmu_entrylo0(void) +{ + return cprcr("cpcr2") << 6; +} + +static inline int read_mmu_entrylo1(void) +{ + return cprcr("cpcr3") << 6; +} + +static inline void write_mmu_pagemask(int value) +{ + cpwcr("cpcr6", value); +} + +static inline int read_mmu_entryhi(void) +{ + return cprcr("cpcr4"); +} + +static inline void write_mmu_entryhi(int value) +{ + cpwcr("cpcr4", value); +} + +static inline unsigned long read_mmu_msa0(void) +{ + return cprcr("cpcr30"); +} + +static inline void write_mmu_msa0(unsigned long value) +{ + cpwcr("cpcr30", value); +} + +static inline unsigned long read_mmu_msa1(void) +{ + return cprcr("cpcr31"); +} + +static inline void write_mmu_msa1(unsigned long value) +{ + cpwcr("cpcr31", value); +} + +/* + * TLB operations. + */ +static inline void tlb_probe(void) +{ + cpwcr("cpcr8", 0x80000000); +} + +static inline void tlb_read(void) +{ + cpwcr("cpcr8", 0x40000000); +} + +static inline void tlb_invalid_all(void) +{ + cpwcr("cpcr8", 0x04000000); +} + + +static inline void local_tlb_invalid_all(void) +{ + tlb_invalid_all(); +} + +static inline void tlb_invalid_indexed(void) +{ + cpwcr("cpcr8", 0x02000000); +} + +static inline void setup_pgd(unsigned long pgd, bool kernel) +{ + cpwcr("cpcr29", pgd | BIT(0)); +} + +static inline unsigned long get_pgd(void) +{ + return cprcr("cpcr29") & ~BIT(0); +} +#endif /* __ASM_CSKY_CKMMUV1_H */ diff --git a/arch/csky/abiv1/inc/abi/elf.h b/arch/csky/abiv1/inc/abi/elf.h new file mode 100644 index 000000000..3058cc06b --- /dev/null +++ b/arch/csky/abiv1/inc/abi/elf.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_ELF_H +#define __ABI_CSKY_ELF_H + +#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ + pr_reg[0] = regs->pc; \ + pr_reg[1] = regs->regs[9]; \ + pr_reg[2] = regs->usp; \ + pr_reg[3] = regs->sr; \ + pr_reg[4] = regs->a0; \ + pr_reg[5] = regs->a1; \ + pr_reg[6] = regs->a2; \ + pr_reg[7] = regs->a3; \ + pr_reg[8] = regs->regs[0]; \ + pr_reg[9] = regs->regs[1]; \ + pr_reg[10] = regs->regs[2]; \ + pr_reg[11] = regs->regs[3]; \ + pr_reg[12] = regs->regs[4]; \ + pr_reg[13] = regs->regs[5]; \ + pr_reg[14] = regs->regs[6]; \ + pr_reg[15] = regs->regs[7]; \ + pr_reg[16] = regs->regs[8]; \ + pr_reg[17] = regs->lr; \ +} while (0); +#endif /* __ABI_CSKY_ELF_H */ diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h new file mode 100644 index 000000000..13c23e2c7 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/entry.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_ENTRY_H +#define __ASM_CSKY_ENTRY_H + +#include <asm/setup.h> +#include <abi/regdef.h> + +#define LSAVE_PC 8 +#define LSAVE_PSR 12 +#define LSAVE_A0 24 +#define LSAVE_A1 28 +#define LSAVE_A2 32 +#define LSAVE_A3 36 +#define LSAVE_A4 40 +#define LSAVE_A5 44 + +#define usp ss1 + +.macro USPTOKSP + mtcr sp, usp + mfcr sp, ss0 +.endm + +.macro KSPTOUSP + mtcr sp, ss0 + mfcr sp, usp +.endm + +.macro SAVE_ALL epc_inc + mtcr r13, ss2 + mfcr r13, epsr + btsti r13, 31 + bt 1f + USPTOKSP +1: + subi sp, 32 + subi sp, 32 + subi sp, 16 + stw r13, (sp, 12) + + stw lr, (sp, 4) + + mfcr lr, epc + movi r13, \epc_inc + add lr, r13 + stw lr, (sp, 8) + + mov lr, sp + addi lr, 32 + addi lr, 32 + addi lr, 16 + bt 2f + mfcr lr, ss1 +2: + stw lr, (sp, 16) + + stw a0, (sp, 20) + stw a0, (sp, 24) + stw a1, (sp, 28) + stw a2, (sp, 32) + stw a3, (sp, 36) + + addi sp, 32 + addi sp, 8 + mfcr r13, ss2 + stw r6, (sp) + stw r7, (sp, 4) + stw r8, (sp, 8) + stw r9, (sp, 12) + stw r10, (sp, 16) + stw r11, (sp, 20) + stw r12, (sp, 24) + stw r13, (sp, 28) + stw r14, (sp, 32) + stw r1, (sp, 36) + subi sp, 32 + subi sp, 8 +.endm + +.macro RESTORE_ALL + ldw lr, (sp, 4) + ldw a0, (sp, 8) + mtcr a0, epc + ldw a0, (sp, 12) + mtcr a0, epsr + btsti a0, 31 + bt 1f + ldw a0, (sp, 16) + mtcr a0, ss1 +1: + ldw a0, (sp, 24) + ldw a1, (sp, 28) + ldw a2, (sp, 32) + ldw a3, (sp, 36) + + addi sp, 32 + addi sp, 8 + ldw r6, (sp) + ldw r7, (sp, 4) + ldw r8, (sp, 8) + ldw r9, (sp, 12) + ldw r10, (sp, 16) + ldw r11, (sp, 20) + ldw r12, (sp, 24) + ldw r13, (sp, 28) + ldw r14, (sp, 32) + ldw r1, (sp, 36) + addi sp, 32 + addi sp, 8 + + bt 2f + KSPTOUSP +2: + rte +.endm + +.macro SAVE_SWITCH_STACK + subi sp, 32 + stm r8-r15, (sp) +.endm + +.macro RESTORE_SWITCH_STACK + ldm r8-r15, (sp) + addi sp, 32 +.endm + +/* MMU registers operators. */ +.macro RD_MIR rx + cprcr \rx, cpcr0 +.endm + +.macro RD_MEH rx + cprcr \rx, cpcr4 +.endm + +.macro RD_MCIR rx + cprcr \rx, cpcr8 +.endm + +.macro RD_PGDR rx + cprcr \rx, cpcr29 +.endm + +.macro WR_MEH rx + cpwcr \rx, cpcr4 +.endm + +.macro WR_MCIR rx + cpwcr \rx, cpcr8 +.endm + +.macro SETUP_MMU + /* Init psr and enable ee */ + lrw r6, DEFAULT_PSR_VALUE + mtcr r6, psr + psrset ee + + /* Select MMU as co-processor */ + cpseti cp15 + + /* + * cpcr30 format: + * 31 - 29 | 28 - 4 | 3 | 2 | 1 | 0 + * BA Reserved C D V + */ + cprcr r6, cpcr30 + lsri r6, 29 + lsli r6, 29 + addi r6, 0xe + cpwcr r6, cpcr30 + + movi r6, 0 + cpwcr r6, cpcr31 +.endm +#endif /* __ASM_CSKY_ENTRY_H */ diff --git a/arch/csky/abiv1/inc/abi/page.h b/arch/csky/abiv1/inc/abi/page.h new file mode 100644 index 000000000..c86451911 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/page.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <asm/shmparam.h> + +extern void flush_dcache_page(struct page *page); + +static inline unsigned long pages_do_alias(unsigned long addr1, + unsigned long addr2) +{ + return (addr1 ^ addr2) & (SHMLBA-1); +} + +static inline void clear_user_page(void *addr, unsigned long vaddr, + struct page *page) +{ + clear_page(addr); + if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) + flush_dcache_page(page); +} + +static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *page) +{ + copy_page(to, from); + if (pages_do_alias((unsigned long) to, vaddr & PAGE_MASK)) + flush_dcache_page(page); +} diff --git a/arch/csky/abiv1/inc/abi/pgtable-bits.h b/arch/csky/abiv1/inc/abi/pgtable-bits.h new file mode 100644 index 000000000..d605445aa --- /dev/null +++ b/arch/csky/abiv1/inc/abi/pgtable-bits.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PGTABLE_BITS_H +#define __ASM_CSKY_PGTABLE_BITS_H + +/* implemented in software */ +#define _PAGE_ACCESSED (1<<3) +#define PAGE_ACCESSED_BIT (3) + +#define _PAGE_READ (1<<1) +#define _PAGE_WRITE (1<<2) +#define _PAGE_PRESENT (1<<0) + +#define _PAGE_MODIFIED (1<<4) +#define PAGE_MODIFIED_BIT (4) + +/* implemented in hardware */ +#define _PAGE_GLOBAL (1<<6) + +#define _PAGE_VALID (1<<7) +#define PAGE_VALID_BIT (7) + +#define _PAGE_DIRTY (1<<8) +#define PAGE_DIRTY_BIT (8) + +#define _PAGE_CACHE (3<<9) +#define _PAGE_UNCACHE (2<<9) +#define _PAGE_SO _PAGE_UNCACHE + +#define _CACHE_MASK (7<<9) + +#define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE) +#define _CACHE_UNCACHED (_PAGE_VALID | _PAGE_UNCACHE) + +#define HAVE_ARCH_UNMAPPED_AREA + +#endif /* __ASM_CSKY_PGTABLE_BITS_H */ diff --git a/arch/csky/abiv1/inc/abi/reg_ops.h b/arch/csky/abiv1/inc/abi/reg_ops.h new file mode 100644 index 000000000..a153bd391 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/reg_ops.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_REG_OPS_H +#define __ABI_REG_OPS_H +#include <asm/reg_ops.h> + +#define cprcr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprcr %0, "reg"\n":"=b"(tmp)); \ + tmp; \ +}) + +#define cpwcr(reg, val) \ +({ \ + asm volatile("cpwcr %0, "reg"\n"::"b"(val)); \ +}) + +static inline unsigned int mfcr_hint(void) +{ + return mfcr("cr30"); +} + +static inline unsigned int mfcr_ccr2(void) { return 0; } + +#endif /* __ABI_REG_OPS_H */ diff --git a/arch/csky/abiv1/inc/abi/regdef.h b/arch/csky/abiv1/inc/abi/regdef.h new file mode 100644 index 000000000..104707fbd --- /dev/null +++ b/arch/csky/abiv1/inc/abi/regdef.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_REGDEF_H +#define __ASM_CSKY_REGDEF_H + +#define syscallid r1 +#define regs_syscallid(regs) regs->regs[9] +#define regs_fp(regs) regs->regs[2] + +/* + * PSR format: + * | 31 | 30-24 | 23-16 | 15 14 | 13-0 | + * S CPID VEC TM + * + * S: Super Mode + * CPID: Coprocessor id, only 15 for MMU + * VEC: Exception Number + * TM: Trace Mode + */ +#define DEFAULT_PSR_VALUE 0x8f000000 + +#define SYSTRACE_SAVENUM 2 + +#define TRAP0_SIZE 2 + +#endif /* __ASM_CSKY_REGDEF_H */ diff --git a/arch/csky/abiv1/inc/abi/string.h b/arch/csky/abiv1/inc/abi/string.h new file mode 100644 index 000000000..0cd43384f --- /dev/null +++ b/arch/csky/abiv1/inc/abi/string.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_CSKY_STRING_H +#define __ABI_CSKY_STRING_H + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, __kernel_size_t); + +#endif /* __ABI_CSKY_STRING_H */ diff --git a/arch/csky/abiv1/inc/abi/switch_context.h b/arch/csky/abiv1/inc/abi/switch_context.h new file mode 100644 index 000000000..17c826864 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/switch_context.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_CSKY_PTRACE_H +#define __ABI_CSKY_PTRACE_H + +struct switch_stack { + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; +}; +#endif /* __ABI_CSKY_PTRACE_H */ diff --git a/arch/csky/abiv1/inc/abi/vdso.h b/arch/csky/abiv1/inc/abi/vdso.h new file mode 100644 index 000000000..14352f524 --- /dev/null +++ b/arch/csky/abiv1/inc/abi/vdso.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/uaccess.h> + +static inline int setup_vdso_page(unsigned short *ptr) +{ + int err = 0; + + /* movi r1, 127 */ + err |= __put_user(0x67f1, ptr + 0); + /* addi r1, (139 - 127) */ + err |= __put_user(0x20b1, ptr + 1); + /* trap 0 */ + err |= __put_user(0x0008, ptr + 2); + + return err; +} diff --git a/arch/csky/abiv1/memcpy.S b/arch/csky/abiv1/memcpy.S new file mode 100644 index 000000000..5078eb516 --- /dev/null +++ b/arch/csky/abiv1/memcpy.S @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> + +.macro GET_FRONT_BITS rx y +#ifdef __cskyLE__ + lsri \rx, \y +#else + lsli \rx, \y +#endif +.endm + +.macro GET_AFTER_BITS rx y +#ifdef __cskyLE__ + lsli \rx, \y +#else + lsri \rx, \y +#endif +.endm + +/* void *memcpy(void *dest, const void *src, size_t n); */ +ENTRY(memcpy) + mov r7, r2 + cmplti r4, 4 + bt .L_copy_by_byte + mov r6, r2 + andi r6, 3 + cmpnei r6, 0 + jbt .L_dest_not_aligned + mov r6, r3 + andi r6, 3 + cmpnei r6, 0 + jbt .L_dest_aligned_but_src_not_aligned +.L0: + cmplti r4, 16 + jbt .L_aligned_and_len_less_16bytes + subi sp, 8 + stw r8, (sp, 0) +.L_aligned_and_len_larger_16bytes: + ldw r1, (r3, 0) + ldw r5, (r3, 4) + ldw r8, (r3, 8) + stw r1, (r7, 0) + ldw r1, (r3, 12) + stw r5, (r7, 4) + stw r8, (r7, 8) + stw r1, (r7, 12) + subi r4, 16 + addi r3, 16 + addi r7, 16 + cmplti r4, 16 + jbf .L_aligned_and_len_larger_16bytes + ldw r8, (sp, 0) + addi sp, 8 + cmpnei r4, 0 + jbf .L_return + +.L_aligned_and_len_less_16bytes: + cmplti r4, 4 + bt .L_copy_by_byte +.L1: + ldw r1, (r3, 0) + stw r1, (r7, 0) + subi r4, 4 + addi r3, 4 + addi r7, 4 + cmplti r4, 4 + jbf .L1 + br .L_copy_by_byte + +.L_return: + rts + +.L_copy_by_byte: /* len less than 4 bytes */ + cmpnei r4, 0 + jbf .L_return +.L4: + ldb r1, (r3, 0) + stb r1, (r7, 0) + addi r3, 1 + addi r7, 1 + decne r4 + jbt .L4 + rts + +/* + * If dest is not aligned, just copying some bytes makes the dest align. + * Afther that, we judge whether the src is aligned. + */ +.L_dest_not_aligned: + mov r5, r3 + rsub r5, r5, r7 + abs r5, r5 + cmplt r5, r4 + bt .L_copy_by_byte + mov r5, r7 + sub r5, r3 + cmphs r5, r4 + bf .L_copy_by_byte + mov r5, r6 +.L5: + ldb r1, (r3, 0) /* makes the dest align. */ + stb r1, (r7, 0) + addi r5, 1 + subi r4, 1 + addi r3, 1 + addi r7, 1 + cmpnei r5, 4 + jbt .L5 + cmplti r4, 4 + jbt .L_copy_by_byte + mov r6, r3 /* judge whether the src is aligned. */ + andi r6, 3 + cmpnei r6, 0 + jbf .L0 + +/* Judge the number of misaligned, 1, 2, 3? */ +.L_dest_aligned_but_src_not_aligned: + mov r5, r3 + rsub r5, r5, r7 + abs r5, r5 + cmplt r5, r4 + bt .L_copy_by_byte + bclri r3, 0 + bclri r3, 1 + ldw r1, (r3, 0) + addi r3, 4 + cmpnei r6, 2 + bf .L_dest_aligned_but_src_not_aligned_2bytes + cmpnei r6, 3 + bf .L_dest_aligned_but_src_not_aligned_3bytes + +.L_dest_aligned_but_src_not_aligned_1byte: + mov r5, r7 + sub r5, r3 + cmphs r5, r4 + bf .L_copy_by_byte + cmplti r4, 16 + bf .L11 +.L10: /* If the len is less than 16 bytes */ + GET_FRONT_BITS r1 8 + mov r5, r1 + ldw r6, (r3, 0) + mov r1, r6 + GET_AFTER_BITS r6 24 + or r5, r6 + stw r5, (r7, 0) + subi r4, 4 + addi r3, 4 + addi r7, 4 + cmplti r4, 4 + bf .L10 + subi r3, 3 + br .L_copy_by_byte +.L11: + subi sp, 16 + stw r8, (sp, 0) + stw r9, (sp, 4) + stw r10, (sp, 8) + stw r11, (sp, 12) +.L12: + ldw r5, (r3, 0) + ldw r11, (r3, 4) + ldw r8, (r3, 8) + ldw r9, (r3, 12) + + GET_FRONT_BITS r1 8 /* little or big endian? */ + mov r10, r5 + GET_AFTER_BITS r5 24 + or r5, r1 + + GET_FRONT_BITS r10 8 + mov r1, r11 + GET_AFTER_BITS r11 24 + or r11, r10 + + GET_FRONT_BITS r1 8 + mov r10, r8 + GET_AFTER_BITS r8 24 + or r8, r1 + + GET_FRONT_BITS r10 8 + mov r1, r9 + GET_AFTER_BITS r9 24 + or r9, r10 + + stw r5, (r7, 0) + stw r11, (r7, 4) + stw r8, (r7, 8) + stw r9, (r7, 12) + subi r4, 16 + addi r3, 16 + addi r7, 16 + cmplti r4, 16 + jbf .L12 + ldw r8, (sp, 0) + ldw r9, (sp, 4) + ldw r10, (sp, 8) + ldw r11, (sp, 12) + addi sp , 16 + cmplti r4, 4 + bf .L10 + subi r3, 3 + br .L_copy_by_byte + +.L_dest_aligned_but_src_not_aligned_2bytes: + cmplti r4, 16 + bf .L21 +.L20: + GET_FRONT_BITS r1 16 + mov r5, r1 + ldw r6, (r3, 0) + mov r1, r6 + GET_AFTER_BITS r6 16 + or r5, r6 + stw r5, (r7, 0) + subi r4, 4 + addi r3, 4 + addi r7, 4 + cmplti r4, 4 + bf .L20 + subi r3, 2 + br .L_copy_by_byte + rts + +.L21: /* n > 16 */ + subi sp, 16 + stw r8, (sp, 0) + stw r9, (sp, 4) + stw r10, (sp, 8) + stw r11, (sp, 12) + +.L22: + ldw r5, (r3, 0) + ldw r11, (r3, 4) + ldw r8, (r3, 8) + ldw r9, (r3, 12) + + GET_FRONT_BITS r1 16 + mov r10, r5 + GET_AFTER_BITS r5 16 + or r5, r1 + + GET_FRONT_BITS r10 16 + mov r1, r11 + GET_AFTER_BITS r11 16 + or r11, r10 + + GET_FRONT_BITS r1 16 + mov r10, r8 + GET_AFTER_BITS r8 16 + or r8, r1 + + GET_FRONT_BITS r10 16 + mov r1, r9 + GET_AFTER_BITS r9 16 + or r9, r10 + + stw r5, (r7, 0) + stw r11, (r7, 4) + stw r8, (r7, 8) + stw r9, (r7, 12) + subi r4, 16 + addi r3, 16 + addi r7, 16 + cmplti r4, 16 + jbf .L22 + ldw r8, (sp, 0) + ldw r9, (sp, 4) + ldw r10, (sp, 8) + ldw r11, (sp, 12) + addi sp, 16 + cmplti r4, 4 + bf .L20 + subi r3, 2 + br .L_copy_by_byte + + +.L_dest_aligned_but_src_not_aligned_3bytes: + cmplti r4, 16 + bf .L31 +.L30: + GET_FRONT_BITS r1 24 + mov r5, r1 + ldw r6, (r3, 0) + mov r1, r6 + GET_AFTER_BITS r6 8 + or r5, r6 + stw r5, (r7, 0) + subi r4, 4 + addi r3, 4 + addi r7, 4 + cmplti r4, 4 + bf .L30 + subi r3, 1 + br .L_copy_by_byte +.L31: + subi sp, 16 + stw r8, (sp, 0) + stw r9, (sp, 4) + stw r10, (sp, 8) + stw r11, (sp, 12) +.L32: + ldw r5, (r3, 0) + ldw r11, (r3, 4) + ldw r8, (r3, 8) + ldw r9, (r3, 12) + + GET_FRONT_BITS r1 24 + mov r10, r5 + GET_AFTER_BITS r5 8 + or r5, r1 + + GET_FRONT_BITS r10 24 + mov r1, r11 + GET_AFTER_BITS r11 8 + or r11, r10 + + GET_FRONT_BITS r1 24 + mov r10, r8 + GET_AFTER_BITS r8 8 + or r8, r1 + + GET_FRONT_BITS r10 24 + mov r1, r9 + GET_AFTER_BITS r9 8 + or r9, r10 + + stw r5, (r7, 0) + stw r11, (r7, 4) + stw r8, (r7, 8) + stw r9, (r7, 12) + subi r4, 16 + addi r3, 16 + addi r7, 16 + cmplti r4, 16 + jbf .L32 + ldw r8, (sp, 0) + ldw r9, (sp, 4) + ldw r10, (sp, 8) + ldw r11, (sp, 12) + addi sp, 16 + cmplti r4, 4 + bf .L30 + subi r3, 1 + br .L_copy_by_byte diff --git a/arch/csky/abiv1/mmap.c b/arch/csky/abiv1/mmap.c new file mode 100644 index 000000000..6792aca49 --- /dev/null +++ b/arch/csky/abiv1/mmap.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/shm.h> +#include <linux/sched.h> +#include <linux/random.h> +#include <linux/io.h> + +#define COLOUR_ALIGN(addr,pgoff) \ + ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ + (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) + +/* + * We need to ensure that shared mappings are correctly aligned to + * avoid aliasing issues with VIPT caches. We need to ensure that + * a specific page of an object is always mapped at a multiple of + * SHMLBA bytes. + * + * We unconditionally provide this function for all cases. + */ +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int do_align = 0; + struct vm_unmapped_area_info info; + + /* + * We only need to do colour alignment if either the I or D + * caches alias. + */ + do_align = filp || (flags & MAP_SHARED); + + /* + * We enforce the MAP_FIXED case. + */ + if (flags & MAP_FIXED) { + if (flags & MAP_SHARED && + (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) + return -EINVAL; + return addr; + } + + if (len > TASK_SIZE) + return -ENOMEM; + + if (addr) { + if (do_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + + info.flags = 0; + info.length = len; + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); +} diff --git a/arch/csky/abiv1/strksyms.c b/arch/csky/abiv1/strksyms.c new file mode 100644 index 000000000..c7ccbb27e --- /dev/null +++ b/arch/csky/abiv1/strksyms.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/module.h> + +EXPORT_SYMBOL(memcpy); diff --git a/arch/csky/abiv2/Makefile b/arch/csky/abiv2/Makefile new file mode 100644 index 000000000..c561efa55 --- /dev/null +++ b/arch/csky/abiv2/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += cacheflush.o +obj-$(CONFIG_CPU_HAS_FPU) += fpu.o +obj-y += memcmp.o +obj-y += memcpy.o +obj-y += memmove.o +obj-y += memset.o +obj-y += strcmp.o +obj-y += strcpy.o +obj-y += strlen.o +obj-y += strksyms.o +obj-$(CONFIG_FUNCTION_TRACER) += mcount.o diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c new file mode 100644 index 000000000..790f1ebfb --- /dev/null +++ b/arch/csky/abiv2/cacheflush.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/cache.h> +#include <linux/highmem.h> +#include <linux/mm.h> +#include <asm/cache.h> + +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *pte) +{ + unsigned long addr; + struct page *page; + + page = pfn_to_page(pte_pfn(*pte)); + if (page == ZERO_PAGE(0)) + return; + + if (test_and_set_bit(PG_dcache_clean, &page->flags)) + return; + + addr = (unsigned long) kmap_atomic(page); + + dcache_wb_range(addr, addr + PAGE_SIZE); + + if (vma->vm_flags & VM_EXEC) + icache_inv_range(addr, addr + PAGE_SIZE); + + kunmap_atomic((void *) addr); +} + +void flush_icache_deferred(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + cpumask_t *mask = &mm->context.icache_stale_mask; + + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + /* + * Ensure the remote hart's writes are visible to this hart. + * This pairs with a barrier in flush_icache_mm. + */ + smp_mb(); + local_icache_inv_all(NULL); + } +} + +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + unsigned int cpu; + cpumask_t others, *mask; + + preempt_disable(); + +#ifdef CONFIG_CPU_HAS_ICACHE_INS + if (mm == current->mm) { + icache_inv_range(start, end); + preempt_enable(); + return; + } +#endif + + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); + + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_icache_inv_all(NULL); + + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + + if (mm != current->active_mm || !cpumask_empty(&others)) { + on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); + cpumask_clear(mask); + } + + preempt_enable(); +} diff --git a/arch/csky/abiv2/fpu.c b/arch/csky/abiv2/fpu.c new file mode 100644 index 000000000..5acc5c2e5 --- /dev/null +++ b/arch/csky/abiv2/fpu.c @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/ptrace.h> +#include <linux/uaccess.h> +#include <abi/reg_ops.h> + +#define MTCR_MASK 0xFC00FFE0 +#define MFCR_MASK 0xFC00FFE0 +#define MTCR_DIST 0xC0006420 +#define MFCR_DIST 0xC0006020 + +/* + * fpu_libc_helper() is to help libc to excute: + * - mfcr %a, cr<1, 2> + * - mfcr %a, cr<2, 2> + * - mtcr %a, cr<1, 2> + * - mtcr %a, cr<2, 2> + */ +int fpu_libc_helper(struct pt_regs *regs) +{ + int fault; + unsigned long instrptr, regx = 0; + unsigned long index = 0, tmp = 0; + unsigned long tinstr = 0; + u16 instr_hi, instr_low; + + instrptr = instruction_pointer(regs); + if (instrptr & 1) + return 0; + + fault = __get_user(instr_low, (u16 *)instrptr); + if (fault) + return 0; + + fault = __get_user(instr_hi, (u16 *)(instrptr + 2)); + if (fault) + return 0; + + tinstr = instr_hi | ((unsigned long)instr_low << 16); + + if (((tinstr >> 21) & 0x1F) != 2) + return 0; + + if ((tinstr & MTCR_MASK) == MTCR_DIST) { + index = (tinstr >> 16) & 0x1F; + if (index > 13) + return 0; + + tmp = tinstr & 0x1F; + if (tmp > 2) + return 0; + + regx = *(®s->a0 + index); + + if (tmp == 1) + mtcr("cr<1, 2>", regx); + else if (tmp == 2) + mtcr("cr<2, 2>", regx); + else + return 0; + + regs->pc += 4; + return 1; + } + + if ((tinstr & MFCR_MASK) == MFCR_DIST) { + index = tinstr & 0x1F; + if (index > 13) + return 0; + + tmp = ((tinstr >> 16) & 0x1F); + if (tmp > 2) + return 0; + + if (tmp == 1) + regx = mfcr("cr<1, 2>"); + else if (tmp == 2) + regx = mfcr("cr<2, 2>"); + else + return 0; + + *(®s->a0 + index) = regx; + + regs->pc += 4; + return 1; + } + + return 0; +} + +void fpu_fpe(struct pt_regs *regs) +{ + int sig, code; + unsigned int fesr; + + fesr = mfcr("cr<2, 2>"); + + sig = SIGFPE; + code = FPE_FLTUNK; + + if (fesr & FPE_ILLE) { + sig = SIGILL; + code = ILL_ILLOPC; + } else if (fesr & FPE_IDC) { + sig = SIGILL; + code = ILL_ILLOPN; + } else if (fesr & FPE_FEC) { + sig = SIGFPE; + if (fesr & FPE_IOC) + code = FPE_FLTINV; + else if (fesr & FPE_DZC) + code = FPE_FLTDIV; + else if (fesr & FPE_UFC) + code = FPE_FLTUND; + else if (fesr & FPE_OFC) + code = FPE_FLTOVF; + else if (fesr & FPE_IXC) + code = FPE_FLTRES; + } + + force_sig_fault(sig, code, (void __user *)regs->pc); +} + +#define FMFVR_FPU_REGS(vrx, vry) \ + "fmfvrl %0, "#vrx"\n" \ + "fmfvrh %1, "#vrx"\n" \ + "fmfvrl %2, "#vry"\n" \ + "fmfvrh %3, "#vry"\n" + +#define FMTVR_FPU_REGS(vrx, vry) \ + "fmtvrl "#vrx", %0\n" \ + "fmtvrh "#vrx", %1\n" \ + "fmtvrl "#vry", %2\n" \ + "fmtvrh "#vry", %3\n" + +#define STW_FPU_REGS(a, b, c, d) \ + "stw %0, (%4, "#a")\n" \ + "stw %1, (%4, "#b")\n" \ + "stw %2, (%4, "#c")\n" \ + "stw %3, (%4, "#d")\n" + +#define LDW_FPU_REGS(a, b, c, d) \ + "ldw %0, (%4, "#a")\n" \ + "ldw %1, (%4, "#b")\n" \ + "ldw %2, (%4, "#c")\n" \ + "ldw %3, (%4, "#d")\n" + +void save_to_user_fp(struct user_fp *user_fp) +{ + unsigned long flg; + unsigned long tmp1, tmp2; + unsigned long *fpregs; + + local_irq_save(flg); + + tmp1 = mfcr("cr<1, 2>"); + tmp2 = mfcr("cr<2, 2>"); + + user_fp->fcr = tmp1; + user_fp->fesr = tmp2; + + fpregs = &user_fp->vr[0]; +#ifdef CONFIG_CPU_HAS_FPUV2 +#ifdef CONFIG_CPU_HAS_VDSP + asm volatile( + "vstmu.32 vr0-vr3, (%0)\n" + "vstmu.32 vr4-vr7, (%0)\n" + "vstmu.32 vr8-vr11, (%0)\n" + "vstmu.32 vr12-vr15, (%0)\n" + "fstmu.64 vr16-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#else + asm volatile( + "fstmu.64 vr0-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#endif +#else + { + unsigned long tmp3, tmp4; + + asm volatile( + FMFVR_FPU_REGS(vr0, vr1) + STW_FPU_REGS(0, 4, 16, 20) + FMFVR_FPU_REGS(vr2, vr3) + STW_FPU_REGS(32, 36, 48, 52) + FMFVR_FPU_REGS(vr4, vr5) + STW_FPU_REGS(64, 68, 80, 84) + FMFVR_FPU_REGS(vr6, vr7) + STW_FPU_REGS(96, 100, 112, 116) + "addi %4, 128\n" + FMFVR_FPU_REGS(vr8, vr9) + STW_FPU_REGS(0, 4, 16, 20) + FMFVR_FPU_REGS(vr10, vr11) + STW_FPU_REGS(32, 36, 48, 52) + FMFVR_FPU_REGS(vr12, vr13) + STW_FPU_REGS(64, 68, 80, 84) + FMFVR_FPU_REGS(vr14, vr15) + STW_FPU_REGS(96, 100, 112, 116) + : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), + "=a"(tmp4), "+a"(fpregs) + ::"memory"); + } +#endif + + local_irq_restore(flg); +} + +void restore_from_user_fp(struct user_fp *user_fp) +{ + unsigned long flg; + unsigned long tmp1, tmp2; + unsigned long *fpregs; + + local_irq_save(flg); + + tmp1 = user_fp->fcr; + tmp2 = user_fp->fesr; + + mtcr("cr<1, 2>", tmp1); + mtcr("cr<2, 2>", tmp2); + + fpregs = &user_fp->vr[0]; +#ifdef CONFIG_CPU_HAS_FPUV2 +#ifdef CONFIG_CPU_HAS_VDSP + asm volatile( + "vldmu.32 vr0-vr3, (%0)\n" + "vldmu.32 vr4-vr7, (%0)\n" + "vldmu.32 vr8-vr11, (%0)\n" + "vldmu.32 vr12-vr15, (%0)\n" + "fldmu.64 vr16-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#else + asm volatile( + "fldmu.64 vr0-vr31, (%0)\n" + : "+a"(fpregs) + ::"memory"); +#endif +#else + { + unsigned long tmp3, tmp4; + + asm volatile( + LDW_FPU_REGS(0, 4, 16, 20) + FMTVR_FPU_REGS(vr0, vr1) + LDW_FPU_REGS(32, 36, 48, 52) + FMTVR_FPU_REGS(vr2, vr3) + LDW_FPU_REGS(64, 68, 80, 84) + FMTVR_FPU_REGS(vr4, vr5) + LDW_FPU_REGS(96, 100, 112, 116) + FMTVR_FPU_REGS(vr6, vr7) + "addi %4, 128\n" + LDW_FPU_REGS(0, 4, 16, 20) + FMTVR_FPU_REGS(vr8, vr9) + LDW_FPU_REGS(32, 36, 48, 52) + FMTVR_FPU_REGS(vr10, vr11) + LDW_FPU_REGS(64, 68, 80, 84) + FMTVR_FPU_REGS(vr12, vr13) + LDW_FPU_REGS(96, 100, 112, 116) + FMTVR_FPU_REGS(vr14, vr15) + : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), + "=a"(tmp4), "+a"(fpregs) + ::"memory"); + } +#endif + local_irq_restore(flg); +} diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h new file mode 100644 index 000000000..a565e00c3 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/cacheflush.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_CACHEFLUSH_H +#define __ABI_CSKY_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include <linux/mm.h> + +/* + * The cache doesn't need to be flushed when TLB entries change when + * the cache is mapped to physical memory, not virtual memory + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) + +#define PG_dcache_clean PG_arch_1 + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +static inline void flush_dcache_page(struct page *page) +{ + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); +} + +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_icache_page(vma, page) do { } while (0) + +#define flush_icache_range(start, end) cache_wbinv_range(start, end) + +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +void flush_icache_deferred(struct mm_struct *mm); + +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { \ + memcpy(dst, src, len); \ + if (vma->vm_flags & VM_EXEC) { \ + dcache_wb_range((unsigned long)dst, \ + (unsigned long)dst + len); \ + flush_icache_mm_range(current->mm, \ + (unsigned long)dst, \ + (unsigned long)dst + len); \ + } \ +} while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* __ABI_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/abiv2/inc/abi/ckmmu.h b/arch/csky/abiv2/inc/abi/ckmmu.h new file mode 100644 index 000000000..73ded7c72 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/ckmmu.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_CKMMUV2_H +#define __ASM_CSKY_CKMMUV2_H + +#include <abi/reg_ops.h> +#include <asm/barrier.h> + +static inline int read_mmu_index(void) +{ + return mfcr("cr<0, 15>"); +} + +static inline void write_mmu_index(int value) +{ + mtcr("cr<0, 15>", value); +} + +static inline int read_mmu_entrylo0(void) +{ + return mfcr("cr<2, 15>"); +} + +static inline int read_mmu_entrylo1(void) +{ + return mfcr("cr<3, 15>"); +} + +static inline void write_mmu_pagemask(int value) +{ + mtcr("cr<6, 15>", value); +} + +static inline int read_mmu_entryhi(void) +{ + return mfcr("cr<4, 15>"); +} + +static inline void write_mmu_entryhi(int value) +{ + mtcr("cr<4, 15>", value); +} + +static inline unsigned long read_mmu_msa0(void) +{ + return mfcr("cr<30, 15>"); +} + +static inline void write_mmu_msa0(unsigned long value) +{ + mtcr("cr<30, 15>", value); +} + +static inline unsigned long read_mmu_msa1(void) +{ + return mfcr("cr<31, 15>"); +} + +static inline void write_mmu_msa1(unsigned long value) +{ + mtcr("cr<31, 15>", value); +} + +/* + * TLB operations. + */ +static inline void tlb_probe(void) +{ + mtcr("cr<8, 15>", 0x80000000); +} + +static inline void tlb_read(void) +{ + mtcr("cr<8, 15>", 0x40000000); +} + +static inline void tlb_invalid_all(void) +{ +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.alls\n":::"memory"); + sync_is(); +#else + mtcr("cr<8, 15>", 0x04000000); +#endif +} + +static inline void local_tlb_invalid_all(void) +{ +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.all\n":::"memory"); + sync_is(); +#else + tlb_invalid_all(); +#endif +} + +static inline void tlb_invalid_indexed(void) +{ + mtcr("cr<8, 15>", 0x02000000); +} + +static inline void setup_pgd(unsigned long pgd, bool kernel) +{ + if (kernel) + mtcr("cr<28, 15>", pgd | BIT(0)); + else + mtcr("cr<29, 15>", pgd | BIT(0)); +} + +static inline unsigned long get_pgd(void) +{ + return mfcr("cr<29, 15>") & ~BIT(0); +} +#endif /* __ASM_CSKY_CKMMUV2_H */ diff --git a/arch/csky/abiv2/inc/abi/elf.h b/arch/csky/abiv2/inc/abi/elf.h new file mode 100644 index 000000000..290f49ef4 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/elf.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_ELF_H +#define __ABI_CSKY_ELF_H + +/* The member sort in array pr_reg[x] is defined by GDB. */ +#define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ + pr_reg[0] = regs->pc; \ + pr_reg[1] = regs->a1; \ + pr_reg[2] = regs->a0; \ + pr_reg[3] = regs->sr; \ + pr_reg[4] = regs->a2; \ + pr_reg[5] = regs->a3; \ + pr_reg[6] = regs->regs[0]; \ + pr_reg[7] = regs->regs[1]; \ + pr_reg[8] = regs->regs[2]; \ + pr_reg[9] = regs->regs[3]; \ + pr_reg[10] = regs->regs[4]; \ + pr_reg[11] = regs->regs[5]; \ + pr_reg[12] = regs->regs[6]; \ + pr_reg[13] = regs->regs[7]; \ + pr_reg[14] = regs->regs[8]; \ + pr_reg[15] = regs->regs[9]; \ + pr_reg[16] = regs->usp; \ + pr_reg[17] = regs->lr; \ + pr_reg[18] = regs->exregs[0]; \ + pr_reg[19] = regs->exregs[1]; \ + pr_reg[20] = regs->exregs[2]; \ + pr_reg[21] = regs->exregs[3]; \ + pr_reg[22] = regs->exregs[4]; \ + pr_reg[23] = regs->exregs[5]; \ + pr_reg[24] = regs->exregs[6]; \ + pr_reg[25] = regs->exregs[7]; \ + pr_reg[26] = regs->exregs[8]; \ + pr_reg[27] = regs->exregs[9]; \ + pr_reg[28] = regs->exregs[10]; \ + pr_reg[29] = regs->exregs[11]; \ + pr_reg[30] = regs->exregs[12]; \ + pr_reg[31] = regs->exregs[13]; \ + pr_reg[32] = regs->exregs[14]; \ + pr_reg[33] = regs->tls; \ +} while (0); +#endif /* __ABI_CSKY_ELF_H */ diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h new file mode 100644 index 000000000..bedcc6f06 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/entry.h @@ -0,0 +1,302 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_ENTRY_H +#define __ASM_CSKY_ENTRY_H + +#include <asm/setup.h> +#include <abi/regdef.h> + +#define LSAVE_PC 8 +#define LSAVE_PSR 12 +#define LSAVE_A0 24 +#define LSAVE_A1 28 +#define LSAVE_A2 32 +#define LSAVE_A3 36 +#define LSAVE_A4 40 +#define LSAVE_A5 44 + +#define KSPTOUSP +#define USPTOKSP + +#define usp cr<14, 1> + +.macro SAVE_ALL epc_inc + subi sp, 152 + stw tls, (sp, 0) + stw lr, (sp, 4) + + mfcr lr, epc + movi tls, \epc_inc + add lr, tls + stw lr, (sp, 8) + + mfcr lr, epsr + stw lr, (sp, 12) + btsti lr, 31 + bf 1f + addi lr, sp, 152 + br 2f +1: + mfcr lr, usp +2: + stw lr, (sp, 16) + + stw a0, (sp, 20) + stw a0, (sp, 24) + stw a1, (sp, 28) + stw a2, (sp, 32) + stw a3, (sp, 36) + + addi sp, 40 + stm r4-r13, (sp) + + addi sp, 40 + stm r16-r30, (sp) +#ifdef CONFIG_CPU_HAS_HILO + mfhi lr + stw lr, (sp, 60) + mflo lr + stw lr, (sp, 64) + mfcr lr, cr14 + stw lr, (sp, 68) +#endif + subi sp, 80 +.endm + +.macro RESTORE_ALL + ldw tls, (sp, 0) + ldw lr, (sp, 4) + ldw a0, (sp, 8) + mtcr a0, epc + ldw a0, (sp, 12) + mtcr a0, epsr + btsti a0, 31 + ldw a0, (sp, 16) + mtcr a0, usp + mtcr a0, ss0 + +#ifdef CONFIG_CPU_HAS_HILO + ldw a0, (sp, 140) + mthi a0 + ldw a0, (sp, 144) + mtlo a0 + ldw a0, (sp, 148) + mtcr a0, cr14 +#endif + + ldw a0, (sp, 24) + ldw a1, (sp, 28) + ldw a2, (sp, 32) + ldw a3, (sp, 36) + + addi sp, 40 + ldm r4-r13, (sp) + addi sp, 40 + ldm r16-r30, (sp) + addi sp, 72 + bf 1f + mfcr sp, ss0 +1: + rte +.endm + +.macro SAVE_REGS_FTRACE + subi sp, 152 + stw tls, (sp, 0) + stw lr, (sp, 4) + + mfcr lr, psr + stw lr, (sp, 12) + + addi lr, sp, 152 + stw lr, (sp, 16) + + stw a0, (sp, 20) + stw a0, (sp, 24) + stw a1, (sp, 28) + stw a2, (sp, 32) + stw a3, (sp, 36) + + addi sp, 40 + stm r4-r13, (sp) + + addi sp, 40 + stm r16-r30, (sp) +#ifdef CONFIG_CPU_HAS_HILO + mfhi lr + stw lr, (sp, 60) + mflo lr + stw lr, (sp, 64) + mfcr lr, cr14 + stw lr, (sp, 68) +#endif + subi sp, 80 +.endm + +.macro RESTORE_REGS_FTRACE + ldw tls, (sp, 0) + +#ifdef CONFIG_CPU_HAS_HILO + ldw a0, (sp, 140) + mthi a0 + ldw a0, (sp, 144) + mtlo a0 + ldw a0, (sp, 148) + mtcr a0, cr14 +#endif + + ldw a0, (sp, 24) + ldw a1, (sp, 28) + ldw a2, (sp, 32) + ldw a3, (sp, 36) + + addi sp, 40 + ldm r4-r13, (sp) + addi sp, 40 + ldm r16-r30, (sp) + addi sp, 72 +.endm + +.macro SAVE_SWITCH_STACK + subi sp, 64 + stm r4-r11, (sp) + stw lr, (sp, 32) + stw r16, (sp, 36) + stw r17, (sp, 40) + stw r26, (sp, 44) + stw r27, (sp, 48) + stw r28, (sp, 52) + stw r29, (sp, 56) + stw r30, (sp, 60) +#ifdef CONFIG_CPU_HAS_HILO + subi sp, 16 + mfhi lr + stw lr, (sp, 0) + mflo lr + stw lr, (sp, 4) + mfcr lr, cr14 + stw lr, (sp, 8) +#endif +.endm + +.macro RESTORE_SWITCH_STACK +#ifdef CONFIG_CPU_HAS_HILO + ldw lr, (sp, 0) + mthi lr + ldw lr, (sp, 4) + mtlo lr + ldw lr, (sp, 8) + mtcr lr, cr14 + addi sp, 16 +#endif + ldm r4-r11, (sp) + ldw lr, (sp, 32) + ldw r16, (sp, 36) + ldw r17, (sp, 40) + ldw r26, (sp, 44) + ldw r27, (sp, 48) + ldw r28, (sp, 52) + ldw r29, (sp, 56) + ldw r30, (sp, 60) + addi sp, 64 +.endm + +/* MMU registers operators. */ +.macro RD_MIR rx + mfcr \rx, cr<0, 15> +.endm + +.macro RD_MEH rx + mfcr \rx, cr<4, 15> +.endm + +.macro RD_MCIR rx + mfcr \rx, cr<8, 15> +.endm + +.macro RD_PGDR rx + mfcr \rx, cr<29, 15> +.endm + +.macro RD_PGDR_K rx + mfcr \rx, cr<28, 15> +.endm + +.macro WR_MEH rx + mtcr \rx, cr<4, 15> +.endm + +.macro WR_MCIR rx + mtcr \rx, cr<8, 15> +.endm + +.macro SETUP_MMU + /* Init psr and enable ee */ + lrw r6, DEFAULT_PSR_VALUE + mtcr r6, psr + psrset ee + + /* Invalid I/Dcache BTB BHT */ + movi r6, 7 + lsli r6, 16 + addi r6, (1<<4) | 3 + mtcr r6, cr17 + + /* Invalid all TLB */ + bgeni r6, 26 + mtcr r6, cr<8, 15> /* Set MCIR */ + + /* Check MMU on/off */ + mfcr r6, cr18 + btsti r6, 0 + bt 1f + + /* MMU off: setup mapping tlb entry */ + movi r6, 0 + mtcr r6, cr<6, 15> /* Set MPR with 4K page size */ + + grs r6, 1f /* Get current pa by PC */ + bmaski r7, (PAGE_SHIFT + 1) /* r7 = 0x1fff */ + andn r6, r7 + mtcr r6, cr<4, 15> /* Set MEH */ + + mov r8, r6 + movi r7, 0x00000006 + or r8, r7 + mtcr r8, cr<2, 15> /* Set MEL0 */ + movi r7, 0x00001006 + or r8, r7 + mtcr r8, cr<3, 15> /* Set MEL1 */ + + bgeni r8, 28 + mtcr r8, cr<8, 15> /* Set MCIR to write TLB */ + + br 2f +1: + /* + * MMU on: use origin MSA value from bootloader + * + * cr<30/31, 15> MSA register format: + * 31 - 29 | 28 - 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 + * BA Reserved SH WA B SO SEC C D V + */ + mfcr r6, cr<30, 15> /* Get MSA0 */ +2: + lsri r6, 29 + lsli r6, 29 + addi r6, 0x1ce + mtcr r6, cr<30, 15> /* Set MSA0 */ + + movi r6, 0 + mtcr r6, cr<31, 15> /* Clr MSA1 */ + + /* enable MMU */ + mfcr r6, cr18 + bseti r6, 0 + mtcr r6, cr18 + + jmpi 3f /* jump to va */ +3: +.endm +#endif /* __ASM_CSKY_ENTRY_H */ diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h new file mode 100644 index 000000000..09e2700a3 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/fpu.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_FPU_H +#define __ASM_CSKY_FPU_H + +#include <asm/sigcontext.h> +#include <asm/ptrace.h> + +int fpu_libc_helper(struct pt_regs *regs); +void fpu_fpe(struct pt_regs *regs); + +static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); } + +void save_to_user_fp(struct user_fp *user_fp); +void restore_from_user_fp(struct user_fp *user_fp); + +/* + * Define the fesr bit for fpe handle. + */ +#define FPE_ILLE (1 << 16) /* Illegal instruction */ +#define FPE_FEC (1 << 7) /* Input float-point arithmetic exception */ +#define FPE_IDC (1 << 5) /* Input denormalized exception */ +#define FPE_IXC (1 << 4) /* Inexact exception */ +#define FPE_UFC (1 << 3) /* Underflow exception */ +#define FPE_OFC (1 << 2) /* Overflow exception */ +#define FPE_DZC (1 << 1) /* Divide by zero exception */ +#define FPE_IOC (1 << 0) /* Invalid operation exception */ +#define FPE_REGULAR_EXCEPTION (FPE_IXC | FPE_UFC | FPE_OFC | FPE_DZC | FPE_IOC) + +#ifdef CONFIG_OPEN_FPU_IDE +#define IDE_STAT (1 << 5) +#else +#define IDE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_IXE +#define IXE_STAT (1 << 4) +#else +#define IXE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_UFE +#define UFE_STAT (1 << 3) +#else +#define UFE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_OFE +#define OFE_STAT (1 << 2) +#else +#define OFE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_DZE +#define DZE_STAT (1 << 1) +#else +#define DZE_STAT 0 +#endif + +#ifdef CONFIG_OPEN_FPU_IOE +#define IOE_STAT (1 << 0) +#else +#define IOE_STAT 0 +#endif + +#endif /* __ASM_CSKY_FPU_H */ diff --git a/arch/csky/abiv2/inc/abi/page.h b/arch/csky/abiv2/inc/abi/page.h new file mode 100644 index 000000000..0a70cb553 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/page.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +static inline void clear_user_page(void *addr, unsigned long vaddr, + struct page *page) +{ + clear_page(addr); +} + +static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *page) +{ + copy_page(to, from); +} diff --git a/arch/csky/abiv2/inc/abi/pgtable-bits.h b/arch/csky/abiv2/inc/abi/pgtable-bits.h new file mode 100644 index 000000000..137f7932c --- /dev/null +++ b/arch/csky/abiv2/inc/abi/pgtable-bits.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PGTABLE_BITS_H +#define __ASM_CSKY_PGTABLE_BITS_H + +/* implemented in software */ +#define _PAGE_ACCESSED (1<<7) +#define PAGE_ACCESSED_BIT (7) + +#define _PAGE_READ (1<<8) +#define _PAGE_WRITE (1<<9) +#define _PAGE_PRESENT (1<<10) + +#define _PAGE_MODIFIED (1<<11) +#define PAGE_MODIFIED_BIT (11) + +/* implemented in hardware */ +#define _PAGE_GLOBAL (1<<0) + +#define _PAGE_VALID (1<<1) +#define PAGE_VALID_BIT (1) + +#define _PAGE_DIRTY (1<<2) +#define PAGE_DIRTY_BIT (2) + +#define _PAGE_SO (1<<5) +#define _PAGE_BUF (1<<6) + +#define _PAGE_CACHE (1<<3) + +#define _CACHE_MASK _PAGE_CACHE + +#define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE | _PAGE_BUF) +#define _CACHE_UNCACHED (_PAGE_VALID) + +#endif /* __ASM_CSKY_PGTABLE_BITS_H */ diff --git a/arch/csky/abiv2/inc/abi/reg_ops.h b/arch/csky/abiv2/inc/abi/reg_ops.h new file mode 100644 index 000000000..ae82c3f26 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/reg_ops.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_REG_OPS_H +#define __ABI_REG_OPS_H +#include <asm/reg_ops.h> + +static inline unsigned int mfcr_hint(void) +{ + return mfcr("cr31"); +} + +static inline unsigned int mfcr_ccr2(void) +{ + return mfcr("cr23"); +} +#endif /* __ABI_REG_OPS_H */ diff --git a/arch/csky/abiv2/inc/abi/regdef.h b/arch/csky/abiv2/inc/abi/regdef.h new file mode 100644 index 000000000..d7328bbc1 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/regdef.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_REGDEF_H +#define __ASM_CSKY_REGDEF_H + +#define syscallid r7 +#define regs_syscallid(regs) regs->regs[3] +#define regs_fp(regs) regs->regs[4] + +/* + * PSR format: + * | 31 | 30-24 | 23-16 | 15 14 | 13-10 | 9 | 8-0 | + * S VEC TM MM + * + * S: Super Mode + * VEC: Exception Number + * TM: Trace Mode + * MM: Memory unaligned addr access + */ +#define DEFAULT_PSR_VALUE 0x80000200 + +#define SYSTRACE_SAVENUM 5 + +#define TRAP0_SIZE 4 + +#endif /* __ASM_CSKY_REGDEF_H */ diff --git a/arch/csky/abiv2/inc/abi/string.h b/arch/csky/abiv2/inc/abi/string.h new file mode 100644 index 000000000..f01bad2ac --- /dev/null +++ b/arch/csky/abiv2/inc/abi/string.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_STRING_H +#define __ABI_CSKY_STRING_H + +#define __HAVE_ARCH_MEMCMP +extern int memcmp(const void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, __kernel_size_t); + +#define __HAVE_ARCH_STRCMP +extern int strcmp(const char *, const char *); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *, const char *); + +#define __HAVE_ARCH_STRLEN +extern __kernel_size_t strlen(const char *); + +#endif /* __ABI_CSKY_STRING_H */ diff --git a/arch/csky/abiv2/inc/abi/switch_context.h b/arch/csky/abiv2/inc/abi/switch_context.h new file mode 100644 index 000000000..73a81245a --- /dev/null +++ b/arch/csky/abiv2/inc/abi/switch_context.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ABI_CSKY_PTRACE_H +#define __ABI_CSKY_PTRACE_H + +struct switch_stack { +#ifdef CONFIG_CPU_HAS_HILO + unsigned long rhi; + unsigned long rlo; + unsigned long cr14; + unsigned long pad; +#endif + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + + unsigned long r15; + unsigned long r16; + unsigned long r17; + unsigned long r26; + unsigned long r27; + unsigned long r28; + unsigned long r29; + unsigned long r30; +}; +#endif /* __ABI_CSKY_PTRACE_H */ diff --git a/arch/csky/abiv2/inc/abi/vdso.h b/arch/csky/abiv2/inc/abi/vdso.h new file mode 100644 index 000000000..b60d4a070 --- /dev/null +++ b/arch/csky/abiv2/inc/abi/vdso.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ABI_CSKY_VDSO_H +#define __ABI_CSKY_VDSO_H + +#include <linux/uaccess.h> + +static inline int setup_vdso_page(unsigned short *ptr) +{ + int err = 0; + + /* movi r7, 173 */ + err |= __put_user(0xea07, ptr); + err |= __put_user(0x008b, ptr+1); + + /* trap 0 */ + err |= __put_user(0xc000, ptr+2); + err |= __put_user(0x2020, ptr+3); + + return err; +} + +#endif /* __ABI_CSKY_STRING_H */ diff --git a/arch/csky/abiv2/mcount.S b/arch/csky/abiv2/mcount.S new file mode 100644 index 000000000..d745e10c1 --- /dev/null +++ b/arch/csky/abiv2/mcount.S @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include <asm/ftrace.h> +#include <abi/entry.h> +#include <asm/asm-offsets.h> + +/* + * csky-gcc with -pg will put the following asm after prologue: + * push r15 + * jsri _mcount + * + * stack layout after mcount_enter in _mcount(): + * + * current sp => 0:+-------+ + * | a0-a3 | -> must save all argument regs + * +16:+-------+ + * | lr | -> _mcount lr (instrumente function's pc) + * +20:+-------+ + * | fp=r8 | -> instrumented function fp + * +24:+-------+ + * | plr | -> instrumented function lr (parent's pc) + * +-------+ + */ + +.macro mcount_enter + subi sp, 24 + stw a0, (sp, 0) + stw a1, (sp, 4) + stw a2, (sp, 8) + stw a3, (sp, 12) + stw lr, (sp, 16) + stw r8, (sp, 20) +.endm + +.macro mcount_exit + ldw a0, (sp, 0) + ldw a1, (sp, 4) + ldw a2, (sp, 8) + ldw a3, (sp, 12) + ldw t1, (sp, 16) + ldw r8, (sp, 20) + ldw lr, (sp, 24) + addi sp, 28 + jmp t1 +.endm + +.macro mcount_enter_regs + subi sp, 8 + stw lr, (sp, 0) + stw r8, (sp, 4) + SAVE_REGS_FTRACE +.endm + +.macro mcount_exit_regs + RESTORE_REGS_FTRACE + subi sp, 152 + ldw t1, (sp, 4) + addi sp, 152 + ldw r8, (sp, 4) + ldw lr, (sp, 8) + addi sp, 12 + jmp t1 +.endm + +.macro save_return_regs + subi sp, 16 + stw a0, (sp, 0) + stw a1, (sp, 4) + stw a2, (sp, 8) + stw a3, (sp, 12) +.endm + +.macro restore_return_regs + mov lr, a0 + ldw a0, (sp, 0) + ldw a1, (sp, 4) + ldw a2, (sp, 8) + ldw a3, (sp, 12) + addi sp, 16 +.endm + +.macro nop32_stub + nop32 + nop32 + nop32 +.endm + +ENTRY(ftrace_stub) + jmp lr +END(ftrace_stub) + +#ifndef CONFIG_DYNAMIC_FTRACE +ENTRY(_mcount) + mcount_enter + + /* r26 is link register, only used with jsri translation */ + lrw r26, ftrace_trace_function + ldw r26, (r26, 0) + lrw a1, ftrace_stub + cmpne r26, a1 + bf skip_ftrace + + mov a0, lr + subi a0, 4 + ldw a1, (sp, 24) + lrw a2, function_trace_op + ldw a2, (a2, 0) + + jsr r26 + +#ifndef CONFIG_FUNCTION_GRAPH_TRACER +skip_ftrace: + mcount_exit +#else +skip_ftrace: + lrw a0, ftrace_graph_return + ldw a0, (a0, 0) + lrw a1, ftrace_stub + cmpne a0, a1 + bt ftrace_graph_caller + + lrw a0, ftrace_graph_entry + ldw a0, (a0, 0) + lrw a1, ftrace_graph_entry_stub + cmpne a0, a1 + bt ftrace_graph_caller + + mcount_exit +#endif +END(_mcount) +#else /* CONFIG_DYNAMIC_FTRACE */ +ENTRY(_mcount) + mov t1, lr + ldw lr, (sp, 0) + addi sp, 4 + jmp t1 +ENDPROC(_mcount) + +ENTRY(ftrace_caller) + mcount_enter + + ldw a0, (sp, 16) + subi a0, 4 + ldw a1, (sp, 24) + lrw a2, function_trace_op + ldw a2, (a2, 0) + + nop +GLOBAL(ftrace_call) + nop32_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + nop +GLOBAL(ftrace_graph_call) + nop32_stub +#endif + + mcount_exit +ENDPROC(ftrace_caller) +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + mov a0, sp + addi a0, 24 + ldw a1, (sp, 16) + subi a1, 4 + mov a2, r8 + lrw r26, prepare_ftrace_return + jsr r26 + mcount_exit +END(ftrace_graph_caller) + +ENTRY(return_to_handler) + save_return_regs + mov a0, r8 + jsri ftrace_return_to_handler + restore_return_regs + jmp lr +END(return_to_handler) +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +ENTRY(ftrace_regs_caller) + mcount_enter_regs + + lrw t1, PT_FRAME_SIZE + add t1, sp + + ldw a0, (t1, 0) + subi a0, 4 + ldw a1, (t1, 8) + lrw a2, function_trace_op + ldw a2, (a2, 0) + mov a3, sp + + nop +GLOBAL(ftrace_regs_call) + nop32_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + nop +GLOBAL(ftrace_graph_regs_call) + nop32_stub +#endif + + mcount_exit_regs +ENDPROC(ftrace_regs_caller) +#endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/csky/abiv2/memcmp.S b/arch/csky/abiv2/memcmp.S new file mode 100644 index 000000000..bf0d809f0 --- /dev/null +++ b/arch/csky/abiv2/memcmp.S @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + +ENTRY(memcmp) + /* Test if len less than 4 bytes. */ + mov r3, r0 + movi r0, 0 + mov r12, r4 + cmplti r2, 4 + bt .L_compare_by_byte + + andi r13, r0, 3 + movi r19, 4 + + /* Test if s1 is not 4 bytes aligned. */ + bnez r13, .L_s1_not_aligned + + LABLE_ALIGN +.L_s1_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_compare_by_word + +.L_compare_by_4word: + /* If aligned, load word each time. */ + ldw r20, (r3, 0) + ldw r21, (r1, 0) + /* If s1[i] != s2[i], goto .L_byte_check. */ + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 4) + ldw r21, (r1, 4) + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 8) + ldw r21, (r1, 8) + cmpne r20, r21 + bt .L_byte_check + + ldw r20, (r3, 12) + ldw r21, (r1, 12) + cmpne r20, r21 + bt .L_byte_check + + PRE_BNEZAD (r18) + addi a3, 16 + addi a1, 16 + + BNEZAD (r18, .L_compare_by_4word) + +.L_compare_by_word: + zext r18, r2, 3, 2 + bez r18, .L_compare_by_byte +.L_compare_by_word_loop: + ldw r20, (r3, 0) + ldw r21, (r1, 0) + addi r3, 4 + PRE_BNEZAD (r18) + cmpne r20, r21 + addi r1, 4 + bt .L_byte_check + BNEZAD (r18, .L_compare_by_word_loop) + +.L_compare_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_compare_by_byte_loop: + ldb r0, (r3, 0) + ldb r4, (r1, 0) + addi r3, 1 + subu r0, r4 + PRE_BNEZAD (r18) + addi r1, 1 + bnez r0, .L_return + BNEZAD (r18, .L_compare_by_byte_loop) + +.L_return: + mov r4, r12 + rts + +# ifdef __CSKYBE__ +/* d[i] != s[i] in word, so we check byte 0. */ +.L_byte_check: + xtrb0 r0, r20 + xtrb0 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 1 */ + xtrb1 r0, r20 + xtrb1 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 2 */ + xtrb2 r0, r20 + xtrb2 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 3 */ + xtrb3 r0, r20 + xtrb3 r2, r21 + subu r0, r2 +# else +/* s1[i] != s2[i] in word, so we check byte 3. */ +.L_byte_check: + xtrb3 r0, r20 + xtrb3 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 2 */ + xtrb2 r0, r20 + xtrb2 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 1 */ + xtrb1 r0, r20 + xtrb1 r2, r21 + subu r0, r2 + bnez r0, .L_return + + /* check byte 0 */ + xtrb0 r0, r20 + xtrb0 r2, r21 + subu r0, r2 + br .L_return +# endif /* !__CSKYBE__ */ + +/* Compare when s1 is not aligned. */ +.L_s1_not_aligned: + sub r13, r19, r13 + sub r2, r13 +.L_s1_not_aligned_loop: + ldb r0, (r3, 0) + ldb r4, (r1, 0) + addi r3, 1 + subu r0, r4 + PRE_BNEZAD (r13) + addi r1, 1 + bnez r0, .L_return + BNEZAD (r13, .L_s1_not_aligned_loop) + br .L_s1_aligned +ENDPROC(memcmp) diff --git a/arch/csky/abiv2/memcpy.S b/arch/csky/abiv2/memcpy.S new file mode 100644 index 000000000..145bf3a93 --- /dev/null +++ b/arch/csky/abiv2/memcpy.S @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + +ENTRY(__memcpy) +ENTRY(memcpy) + /* Test if len less than 4 bytes. */ + mov r12, r0 + cmplti r2, 4 + bt .L_copy_by_byte + + andi r13, r0, 3 + movi r19, 4 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + +/* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + movi r19, 0 + + LABLE_ALIGN +.L_len_larger_16bytes: +#if defined(__CK860__) + ldw r3, (r1, 0) + stw r3, (r0, 0) + ldw r3, (r1, 4) + stw r3, (r0, 4) + ldw r3, (r1, 8) + stw r3, (r0, 8) + ldw r3, (r1, 12) + addi r1, 16 + stw r3, (r0, 12) + addi r0, 16 +#else + ldw r20, (r1, 0) + ldw r21, (r1, 4) + ldw r22, (r1, 8) + ldw r23, (r1, 12) + stw r20, (r0, 0) + stw r21, (r0, 4) + stw r22, (r0, 8) + stw r23, (r0, 12) + PRE_BNEZAD (r18) + addi r1, 16 + addi r0, 16 +#endif + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + bez r18, .L_copy_by_byte +.L_len_less_16bytes_loop: + ldw r3, (r1, 0) + PRE_BNEZAD (r18) + addi r1, 4 + stw r3, (r0, 0) + addi r0, 4 + BNEZAD (r18, .L_len_less_16bytes_loop) + +/* Test if len less than 4 bytes. */ +.L_copy_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_copy_by_byte_loop: + ldb r3, (r1, 0) + PRE_BNEZAD (r18) + addi r1, 1 + stb r3, (r0, 0) + addi r0, 1 + BNEZAD (r18, .L_copy_by_byte_loop) + +.L_return: + mov r0, r12 + rts + +/* + * If dest is not aligned, just copying some bytes makes the + * dest align. + */ +.L_dest_not_aligned: + sub r13, r19, r13 + sub r2, r13 + +/* Makes the dest align. */ +.L_dest_not_aligned_loop: + ldb r3, (r1, 0) + PRE_BNEZAD (r13) + addi r1, 1 + stb r3, (r0, 0) + addi r0, 1 + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 4 + bt .L_copy_by_byte + + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(__memcpy) diff --git a/arch/csky/abiv2/memmove.S b/arch/csky/abiv2/memmove.S new file mode 100644 index 000000000..5721e73ad --- /dev/null +++ b/arch/csky/abiv2/memmove.S @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + + .weak memmove +ENTRY(__memmove) +ENTRY(memmove) + subu r3, r0, r1 + cmphs r3, r2 + bt memcpy + + mov r12, r0 + addu r0, r0, r2 + addu r1, r1, r2 + + /* Test if len less than 4 bytes. */ + cmplti r2, 4 + bt .L_copy_by_byte + + andi r13, r0, 3 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + /* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + movi r19, 0 + + /* len > 16 bytes */ + LABLE_ALIGN +.L_len_larger_16bytes: + subi r1, 16 + subi r0, 16 +#if defined(__CK860__) + ldw r3, (r1, 12) + stw r3, (r0, 12) + ldw r3, (r1, 8) + stw r3, (r0, 8) + ldw r3, (r1, 4) + stw r3, (r0, 4) + ldw r3, (r1, 0) + stw r3, (r0, 0) +#else + ldw r20, (r1, 0) + ldw r21, (r1, 4) + ldw r22, (r1, 8) + ldw r23, (r1, 12) + stw r20, (r0, 0) + stw r21, (r0, 4) + stw r22, (r0, 8) + stw r23, (r0, 12) + PRE_BNEZAD (r18) +#endif + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + bez r18, .L_copy_by_byte +.L_len_less_16bytes_loop: + subi r1, 4 + subi r0, 4 + ldw r3, (r1, 0) + PRE_BNEZAD (r18) + stw r3, (r0, 0) + BNEZAD (r18, .L_len_less_16bytes_loop) + + /* Test if len less than 4 bytes. */ +.L_copy_by_byte: + zext r18, r2, 1, 0 + bez r18, .L_return +.L_copy_by_byte_loop: + subi r1, 1 + subi r0, 1 + ldb r3, (r1, 0) + PRE_BNEZAD (r18) + stb r3, (r0, 0) + BNEZAD (r18, .L_copy_by_byte_loop) + +.L_return: + mov r0, r12 + rts + + /* If dest is not aligned, just copy some bytes makes the dest + align. */ +.L_dest_not_aligned: + sub r2, r13 +.L_dest_not_aligned_loop: + subi r1, 1 + subi r0, 1 + /* Makes the dest align. */ + ldb r3, (r1, 0) + PRE_BNEZAD (r13) + stb r3, (r0, 0) + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 4 + bt .L_copy_by_byte + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(memmove) +ENDPROC(__memmove) diff --git a/arch/csky/abiv2/memset.S b/arch/csky/abiv2/memset.S new file mode 100644 index 000000000..a7e7d994b --- /dev/null +++ b/arch/csky/abiv2/memset.S @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + + .weak memset +ENTRY(__memset) +ENTRY(memset) + /* Test if len less than 4 bytes. */ + mov r12, r0 + cmplti r2, 8 + bt .L_set_by_byte + + andi r13, r0, 3 + movi r19, 4 + /* Test if dest is not 4 bytes aligned. */ + bnez r13, .L_dest_not_aligned + /* Hardware can handle unaligned access directly. */ +.L_dest_aligned: + zextb r3, r1 + lsli r1, 8 + or r1, r3 + lsli r3, r1, 16 + or r3, r1 + + /* If dest is aligned, then copy. */ + zext r18, r2, 31, 4 + /* Test if len less than 16 bytes. */ + bez r18, .L_len_less_16bytes + + LABLE_ALIGN +.L_len_larger_16bytes: + stw r3, (r0, 0) + stw r3, (r0, 4) + stw r3, (r0, 8) + stw r3, (r0, 12) + PRE_BNEZAD (r18) + addi r0, 16 + BNEZAD (r18, .L_len_larger_16bytes) + +.L_len_less_16bytes: + zext r18, r2, 3, 2 + andi r2, 3 + bez r18, .L_set_by_byte +.L_len_less_16bytes_loop: + stw r3, (r0, 0) + PRE_BNEZAD (r18) + addi r0, 4 + BNEZAD (r18, .L_len_less_16bytes_loop) + + /* Test if len less than 4 bytes. */ +.L_set_by_byte: + zext r18, r2, 2, 0 + bez r18, .L_return +.L_set_by_byte_loop: + stb r1, (r0, 0) + PRE_BNEZAD (r18) + addi r0, 1 + BNEZAD (r18, .L_set_by_byte_loop) + +.L_return: + mov r0, r12 + rts + + /* If dest is not aligned, just set some bytes makes the dest + align. */ + +.L_dest_not_aligned: + sub r13, r19, r13 + sub r2, r13 +.L_dest_not_aligned_loop: + /* Makes the dest align. */ + stb r1, (r0, 0) + PRE_BNEZAD (r13) + addi r0, 1 + BNEZAD (r13, .L_dest_not_aligned_loop) + cmplti r2, 8 + bt .L_set_by_byte + /* Check whether the src is aligned. */ + jbr .L_dest_aligned +ENDPROC(memset) +ENDPROC(__memset) diff --git a/arch/csky/abiv2/strcmp.S b/arch/csky/abiv2/strcmp.S new file mode 100644 index 000000000..f8403f4d8 --- /dev/null +++ b/arch/csky/abiv2/strcmp.S @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + +ENTRY(strcmp) + mov a3, a0 + /* Check if the s1 addr is aligned. */ + xor a2, a3, a1 + andi a2, 0x3 + bnez a2, 7f + andi t1, a0, 0x3 + bnez t1, 5f + +1: + /* If aligned, load word each time. */ + ldw t0, (a3, 0) + ldw t1, (a1, 0) + /* If s1[i] != s2[i], goto 2f. */ + cmpne t0, t1 + bt 2f + /* If s1[i] == s2[i], check if s1 or s2 is at the end. */ + tstnbz t0 + /* If at the end, goto 3f (finish comparing). */ + bf 3f + + ldw t0, (a3, 4) + ldw t1, (a1, 4) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 8) + ldw t1, (a1, 8) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 12) + ldw t1, (a1, 12) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 16) + ldw t1, (a1, 16) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 20) + ldw t1, (a1, 20) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 24) + ldw t1, (a1, 24) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + ldw t0, (a3, 28) + ldw t1, (a1, 28) + cmpne t0, t1 + bt 2f + tstnbz t0 + bf 3f + + addi a3, 32 + addi a1, 32 + + br 1b + +# ifdef __CSKYBE__ + /* d[i] != s[i] in word, so we check byte 0. */ +2: + xtrb0 a0, t0 + xtrb0 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 1 */ + xtrb1 a0, t0 + xtrb1 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 2 */ + xtrb2 a0, t0 + xtrb2 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 3 */ + xtrb3 a0, t0 + xtrb3 a2, t1 + subu a0, a2 +# else + /* s1[i] != s2[i] in word, so we check byte 3. */ +2: + xtrb3 a0, t0 + xtrb3 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 2 */ + xtrb2 a0, t0 + xtrb2 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 1 */ + xtrb1 a0, t0 + xtrb1 a2, t1 + subu a0, a2 + bez a2, 4f + bnez a0, 4f + + /* check byte 0 */ + xtrb0 a0, t0 + xtrb0 a2, t1 + subu a0, a2 + +# endif /* !__CSKYBE__ */ + jmp lr +3: + movi a0, 0 +4: + jmp lr + + /* Compare when s1 or s2 is not aligned. */ +5: + subi t1, 4 +6: + ldb a0, (a3, 0) + ldb a2, (a1, 0) + subu a0, a2 + bez a2, 4b + bnez a0, 4b + addi t1, 1 + addi a1, 1 + addi a3, 1 + bnez t1, 6b + br 1b + +7: + ldb a0, (a3, 0) + addi a3, 1 + ldb a2, (a1, 0) + addi a1, 1 + subu a0, a2 + bnez a0, 4b + bnez a2, 7b + jmp r15 +ENDPROC(strcmp) diff --git a/arch/csky/abiv2/strcpy.S b/arch/csky/abiv2/strcpy.S new file mode 100644 index 000000000..3c6d3f6a5 --- /dev/null +++ b/arch/csky/abiv2/strcpy.S @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + +ENTRY(strcpy) + mov a3, a0 + /* Check if the src addr is aligned. */ + andi t0, a1, 3 + bnez t0, 11f +1: + /* Check if all the bytes in the word are not zero. */ + ldw a2, (a1) + tstnbz a2 + bf 9f + stw a2, (a3) + + ldw a2, (a1, 4) + tstnbz a2 + bf 2f + stw a2, (a3, 4) + + ldw a2, (a1, 8) + tstnbz a2 + bf 3f + stw a2, (a3, 8) + + ldw a2, (a1, 12) + tstnbz a2 + bf 4f + stw a2, (a3, 12) + + ldw a2, (a1, 16) + tstnbz a2 + bf 5f + stw a2, (a3, 16) + + ldw a2, (a1, 20) + tstnbz a2 + bf 6f + stw a2, (a3, 20) + + ldw a2, (a1, 24) + tstnbz a2 + bf 7f + stw a2, (a3, 24) + + ldw a2, (a1, 28) + tstnbz a2 + bf 8f + stw a2, (a3, 28) + + addi a3, 32 + addi a1, 32 + br 1b + + +2: + addi a3, 4 + br 9f + +3: + addi a3, 8 + br 9f + +4: + addi a3, 12 + br 9f + +5: + addi a3, 16 + br 9f + +6: + addi a3, 20 + br 9f + +7: + addi a3, 24 + br 9f + +8: + addi a3, 28 +9: +# ifdef __CSKYBE__ + xtrb0 t0, a2 + st.b t0, (a3) + bez t0, 10f + xtrb1 t0, a2 + st.b t0, (a3, 1) + bez t0, 10f + xtrb2 t0, a2 + st.b t0, (a3, 2) + bez t0, 10f + stw a2, (a3) +# else + xtrb3 t0, a2 + st.b t0, (a3) + bez t0, 10f + xtrb2 t0, a2 + st.b t0, (a3, 1) + bez t0, 10f + xtrb1 t0, a2 + st.b t0, (a3, 2) + bez t0, 10f + stw a2, (a3) +# endif /* !__CSKYBE__ */ +10: + jmp lr + +11: + subi t0, 4 +12: + ld.b a2, (a1) + st.b a2, (a3) + bez a2, 10b + addi t0, 1 + addi a1, a1, 1 + addi a3, a3, 1 + bnez t0, 12b + jbr 1b +ENDPROC(strcpy) diff --git a/arch/csky/abiv2/strksyms.c b/arch/csky/abiv2/strksyms.c new file mode 100644 index 000000000..06da723d8 --- /dev/null +++ b/arch/csky/abiv2/strksyms.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/module.h> + +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strcpy); +EXPORT_SYMBOL(strlen); diff --git a/arch/csky/abiv2/strlen.S b/arch/csky/abiv2/strlen.S new file mode 100644 index 000000000..bcdd70764 --- /dev/null +++ b/arch/csky/abiv2/strlen.S @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include "sysdep.h" + +ENTRY(strlen) + /* Check if the start addr is aligned. */ + mov r3, r0 + andi r1, r0, 3 + movi r2, 4 + movi r0, 0 + bnez r1, .L_start_not_aligned + + LABLE_ALIGN +.L_start_addr_aligned: + /* Check if all the bytes in the word are not zero. */ + ldw r1, (r3) + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 4) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 8) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 12) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 16) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 20) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 24) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + ldw r1, (r3, 28) + addi r0, 4 + tstnbz r1 + bf .L_string_tail + + addi r0, 4 + addi r3, 32 + br .L_start_addr_aligned + +.L_string_tail: +# ifdef __CSKYBE__ + xtrb0 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb1 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb2 r3, r1 + bez r3, .L_return + addi r0, 1 +# else + xtrb3 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb2 r3, r1 + bez r3, .L_return + addi r0, 1 + xtrb1 r3, r1 + bez r3, .L_return + addi r0, 1 +# endif /* !__CSKYBE__ */ + +.L_return: + rts + +.L_start_not_aligned: + sub r2, r2, r1 +.L_start_not_aligned_loop: + ldb r1, (r3) + PRE_BNEZAD (r2) + addi r3, 1 + bez r1, .L_return + addi r0, 1 + BNEZAD (r2, .L_start_not_aligned_loop) + br .L_start_addr_aligned +ENDPROC(strlen) diff --git a/arch/csky/abiv2/sysdep.h b/arch/csky/abiv2/sysdep.h new file mode 100644 index 000000000..bbbedfd34 --- /dev/null +++ b/arch/csky/abiv2/sysdep.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __SYSDEP_H +#define __SYSDEP_H + +#ifdef __ASSEMBLER__ + +#if defined(__CK860__) +#define LABLE_ALIGN \ + .balignw 16, 0x6c03 + +#define PRE_BNEZAD(R) + +#define BNEZAD(R, L) \ + bnezad R, L +#else +#define LABLE_ALIGN \ + .balignw 8, 0x6c03 + +#define PRE_BNEZAD(R) \ + subi R, 1 + +#define BNEZAD(R, L) \ + bnez R, L +#endif + +#endif + +#endif diff --git a/arch/csky/boot/Makefile b/arch/csky/boot/Makefile new file mode 100644 index 000000000..dbc9b1bd7 --- /dev/null +++ b/arch/csky/boot/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +targets := Image zImage uImage +targets += $(dtb-y) + +$(obj)/Image: vmlinux FORCE + $(call if_changed,objcopy) + @echo ' Kernel: $@ is ready' + +compress-$(CONFIG_KERNEL_GZIP) = gzip +compress-$(CONFIG_KERNEL_LZO) = lzo +compress-$(CONFIG_KERNEL_LZMA) = lzma +compress-$(CONFIG_KERNEL_XZ) = xzkern +compress-$(CONFIG_KERNEL_LZ4) = lz4 + +$(obj)/zImage: $(obj)/Image FORCE + $(call if_changed,$(compress-y)) + @echo ' Kernel: $@ is ready' + +UIMAGE_ARCH = sandbox +UIMAGE_COMPRESSION = $(compress-y) +UIMAGE_LOADADDR = $(shell $(NM) vmlinux | awk '$$NF == "_start" {print $$1}') + +$(obj)/uImage: $(obj)/zImage + $(call if_changed,uimage) + @echo 'Image: $@ is ready' diff --git a/arch/csky/boot/dts/Makefile b/arch/csky/boot/dts/Makefile new file mode 100644 index 000000000..5f1f55e91 --- /dev/null +++ b/arch/csky/boot/dts/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +dtstree := $(srctree)/$(src) + +dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) diff --git a/arch/csky/configs/defconfig b/arch/csky/configs/defconfig new file mode 100644 index 000000000..af722e4df --- /dev/null +++ b/arch/csky/configs/defconfig @@ -0,0 +1,53 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="csky" +# CONFIG_SWAP is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=65536 +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_GENERIC_PHY=y +CONFIG_EXT4_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +CONFIG_CRAMFS=y +CONFIG_ROMFS_FS=y +CONFIG_NFS_FS=y +CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild new file mode 100644 index 000000000..2a5a4d94f --- /dev/null +++ b/arch/csky/include/asm/Kbuild @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += asm-offsets.h +generic-y += gpio.h +generic-y += kvm_para.h +generic-y += qrwlock.h +generic-y += seccomp.h +generic-y += user.h +generic-y += vmlinux.lds.h diff --git a/arch/csky/include/asm/addrspace.h b/arch/csky/include/asm/addrspace.h new file mode 100644 index 000000000..d1c2ede69 --- /dev/null +++ b/arch/csky/include/asm/addrspace.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_ADDRSPACE_H +#define __ASM_CSKY_ADDRSPACE_H + +#define KSEG0 0x80000000ul +#define KSEG0ADDR(a) (((unsigned long)a & 0x1fffffff) | KSEG0) + +#endif /* __ASM_CSKY_ADDRSPACE_H */ diff --git a/arch/csky/include/asm/asid.h b/arch/csky/include/asm/asid.h new file mode 100644 index 000000000..ac08b0ffb --- /dev/null +++ b/arch/csky/include/asm/asid.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ASM_ASID_H +#define __ASM_ASM_ASID_H + +#include <linux/atomic.h> +#include <linux/compiler.h> +#include <linux/cpumask.h> +#include <linux/percpu.h> +#include <linux/spinlock.h> + +struct asid_info +{ + atomic64_t generation; + unsigned long *map; + atomic64_t __percpu *active; + u64 __percpu *reserved; + u32 bits; + /* Lock protecting the structure */ + raw_spinlock_t lock; + /* Which CPU requires context flush on next call */ + cpumask_t flush_pending; + /* Number of ASID allocated by context (shift value) */ + unsigned int ctxt_shift; + /* Callback to locally flush the context. */ + void (*flush_cpu_ctxt_cb)(void); +}; + +#define NUM_ASIDS(info) (1UL << ((info)->bits)) +#define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift) + +#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu) + +void asid_new_context(struct asid_info *info, atomic64_t *pasid, + unsigned int cpu, struct mm_struct *mm); + +/* + * Check the ASID is still valid for the context. If not generate a new ASID. + * + * @pasid: Pointer to the current ASID batch + * @cpu: current CPU ID. Must have been acquired throught get_cpu() + */ +static inline void asid_check_context(struct asid_info *info, + atomic64_t *pasid, unsigned int cpu, + struct mm_struct *mm) +{ + u64 asid, old_active_asid; + + asid = atomic64_read(pasid); + + /* + * The memory ordering here is subtle. + * If our active_asid is non-zero and the ASID matches the current + * generation, then we update the active_asid entry with a relaxed + * cmpxchg. Racing with a concurrent rollover means that either: + * + * - We get a zero back from the cmpxchg and end up waiting on the + * lock. Taking the lock synchronises with the rollover and so + * we are forced to see the updated generation. + * + * - We get a valid ASID back from the cmpxchg, which means the + * relaxed xchg in flush_context will treat us as reserved + * because atomic RmWs are totally ordered for a given location. + */ + old_active_asid = atomic64_read(&active_asid(info, cpu)); + if (old_active_asid && + !((asid ^ atomic64_read(&info->generation)) >> info->bits) && + atomic64_cmpxchg_relaxed(&active_asid(info, cpu), + old_active_asid, asid)) + return; + + asid_new_context(info, pasid, cpu, mm); +} + +int asid_allocator_init(struct asid_info *info, + u32 bits, unsigned int asid_per_ctxt, + void (*flush_cpu_ctxt_cb)(void)); + +#endif diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h new file mode 100644 index 000000000..e369d73b1 --- /dev/null +++ b/arch/csky/include/asm/atomic.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_ATOMIC_H +#define __ASM_CSKY_ATOMIC_H + +#include <linux/version.h> +#include <asm/cmpxchg.h> +#include <asm/barrier.h> + +#ifdef CONFIG_CPU_HAS_LDSTEX + +#define __atomic_add_unless __atomic_add_unless +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + unsigned long tmp, ret; + + smp_mb(); + + asm volatile ( + "1: ldex.w %0, (%3) \n" + " mov %1, %0 \n" + " cmpne %0, %4 \n" + " bf 2f \n" + " add %0, %2 \n" + " stex.w %0, (%3) \n" + " bez %0, 1b \n" + "2: \n" + : "=&r" (tmp), "=&r" (ret) + : "r" (a), "r"(&v->counter), "r"(u) + : "memory"); + + if (ret != u) + smp_mb(); + + return ret; +} + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + \ + asm volatile ( \ + "1: ldex.w %0, (%2) \n" \ + " " #op " %0, %1 \n" \ + " stex.w %0, (%2) \n" \ + " bez %0, 1b \n" \ + : "=&r" (tmp) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long tmp, ret; \ + \ + smp_mb(); \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " " #op " %0, %2 \n" \ + " mov %1, %0 \n" \ + " stex.w %0, (%3) \n" \ + " bez %0, 1b \n" \ + : "=&r" (tmp), "=&r" (ret) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + smp_mb(); \ + \ + return ret; \ +} + +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp, ret; \ + \ + smp_mb(); \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " mov %1, %0 \n" \ + " " #op " %0, %2 \n" \ + " stex.w %0, (%3) \n" \ + " bez %0, 1b \n" \ + : "=&r" (tmp), "=&r" (ret) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + smp_mb(); \ + \ + return ret; \ +} + +#else /* CONFIG_CPU_HAS_LDSTEX */ + +#include <linux/irqflags.h> + +#define __atomic_add_unless __atomic_add_unless +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + unsigned long tmp, ret, flags; + + raw_local_irq_save(flags); + + asm volatile ( + " ldw %0, (%3) \n" + " mov %1, %0 \n" + " cmpne %0, %4 \n" + " bf 2f \n" + " add %0, %2 \n" + " stw %0, (%3) \n" + "2: \n" + : "=&r" (tmp), "=&r" (ret) + : "r" (a), "r"(&v->counter), "r"(u) + : "memory"); + + raw_local_irq_restore(flags); + + return ret; +} + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp, flags; \ + \ + raw_local_irq_save(flags); \ + \ + asm volatile ( \ + " ldw %0, (%2) \n" \ + " " #op " %0, %1 \n" \ + " stw %0, (%2) \n" \ + : "=&r" (tmp) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + \ + raw_local_irq_restore(flags); \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long tmp, ret, flags; \ + \ + raw_local_irq_save(flags); \ + \ + asm volatile ( \ + " ldw %0, (%3) \n" \ + " " #op " %0, %2 \n" \ + " stw %0, (%3) \n" \ + " mov %1, %0 \n" \ + : "=&r" (tmp), "=&r" (ret) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#define ATOMIC_FETCH_OP(op, c_op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp, ret, flags; \ + \ + raw_local_irq_save(flags); \ + \ + asm volatile ( \ + " ldw %0, (%3) \n" \ + " mov %1, %0 \n" \ + " " #op " %0, %2 \n" \ + " stw %0, (%3) \n" \ + : "=&r" (tmp), "=&r" (ret) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#endif /* CONFIG_CPU_HAS_LDSTEX */ + +#define atomic_add_return atomic_add_return +ATOMIC_OP_RETURN(add, +) +#define atomic_sub_return atomic_sub_return +ATOMIC_OP_RETURN(sub, -) + +#define atomic_fetch_add atomic_fetch_add +ATOMIC_FETCH_OP(add, +) +#define atomic_fetch_sub atomic_fetch_sub +ATOMIC_FETCH_OP(sub, -) +#define atomic_fetch_and atomic_fetch_and +ATOMIC_FETCH_OP(and, &) +#define atomic_fetch_or atomic_fetch_or +ATOMIC_FETCH_OP(or, |) +#define atomic_fetch_xor atomic_fetch_xor +ATOMIC_FETCH_OP(xor, ^) + +#define atomic_and atomic_and +ATOMIC_OP(and, &) +#define atomic_or atomic_or +ATOMIC_OP(or, |) +#define atomic_xor atomic_xor +ATOMIC_OP(xor, ^) + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#include <asm-generic/atomic.h> + +#endif /* __ASM_CSKY_ATOMIC_H */ diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h new file mode 100644 index 000000000..a430e7fdd --- /dev/null +++ b/arch/csky/include/asm/barrier.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_BARRIER_H +#define __ASM_CSKY_BARRIER_H + +#ifndef __ASSEMBLY__ + +#define nop() asm volatile ("nop\n":::"memory") + +/* + * sync: completion barrier, all sync.xx instructions + * guarantee the last response recieved by bus transaction + * made by ld/st instructions before sync.s + * sync.s: inherit from sync, but also shareable to other cores + * sync.i: inherit from sync, but also flush cpu pipeline + * sync.is: the same with sync.i + sync.s + * + * bar.brwarw: ordering barrier for all load/store instructions before it + * bar.brwarws: ordering barrier for all load/store instructions before it + * and shareable to other cores + * bar.brar: ordering barrier for all load instructions before it + * bar.brars: ordering barrier for all load instructions before it + * and shareable to other cores + * bar.bwaw: ordering barrier for all store instructions before it + * bar.bwaws: ordering barrier for all store instructions before it + * and shareable to other cores + */ + +#ifdef CONFIG_CPU_HAS_CACHEV2 +#define mb() asm volatile ("sync.s\n":::"memory") + +#ifdef CONFIG_SMP +#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory") +#define __smp_rmb() asm volatile ("bar.brars\n":::"memory") +#define __smp_wmb() asm volatile ("bar.bwaws\n":::"memory") +#endif /* CONFIG_SMP */ + +#define sync_is() asm volatile ("sync.is\n":::"memory") + +#else /* !CONFIG_CPU_HAS_CACHEV2 */ +#define mb() asm volatile ("sync\n":::"memory") +#endif + +#include <asm-generic/barrier.h> + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_CSKY_BARRIER_H */ diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h new file mode 100644 index 000000000..43b9838bf --- /dev/null +++ b/arch/csky/include/asm/bitops.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_BITOPS_H +#define __ASM_CSKY_BITOPS_H + +#include <linux/compiler.h> +#include <asm/barrier.h> + +/* + * asm-generic/bitops/ffs.h + */ +static inline int ffs(int x) +{ + if (!x) + return 0; + + asm volatile ( + "brev %0\n" + "ff1 %0\n" + "addi %0, 1\n" + : "=&r"(x) + : "0"(x)); + return x; +} + +/* + * asm-generic/bitops/__ffs.h + */ +static __always_inline unsigned long __ffs(unsigned long x) +{ + asm volatile ( + "brev %0\n" + "ff1 %0\n" + : "=&r"(x) + : "0"(x)); + return x; +} + +/* + * asm-generic/bitops/fls.h + */ +static __always_inline int fls(unsigned int x) +{ + asm volatile( + "ff1 %0\n" + : "=&r"(x) + : "0"(x)); + + return (32 - x); +} + +/* + * asm-generic/bitops/__fls.h + */ +static __always_inline unsigned long __fls(unsigned long x) +{ + return fls(x) - 1; +} + +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/find.h> + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/lock.h> +#include <asm-generic/bitops/atomic.h> + +/* + * bug fix, why only could use atomic!!!! + */ +#include <asm-generic/bitops/non-atomic.h> +#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) + +#include <asm-generic/bitops/le.h> +#include <asm-generic/bitops/ext2-atomic.h> +#endif /* __ASM_CSKY_BITOPS_H */ diff --git a/arch/csky/include/asm/bug.h b/arch/csky/include/asm/bug.h new file mode 100644 index 000000000..33ebd16b9 --- /dev/null +++ b/arch/csky/include/asm/bug.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_BUG_H +#define __ASM_CSKY_BUG_H + +#include <linux/compiler.h> +#include <linux/const.h> +#include <linux/types.h> + +#define BUG() \ +do { \ + asm volatile ("bkpt\n"); \ + unreachable(); \ +} while (0) + +#define HAVE_ARCH_BUG + +#include <asm-generic/bug.h> + +struct pt_regs; + +void die(struct pt_regs *regs, const char *str); +void show_regs(struct pt_regs *regs); +void show_code(struct pt_regs *regs); + +#endif /* __ASM_CSKY_BUG_H */ diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h new file mode 100644 index 000000000..4b5c09bf1 --- /dev/null +++ b/arch/csky/include/asm/cache.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CACHE_H +#define __ASM_CSKY_CACHE_H + +/* bytes per L1 cache line */ +#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT + +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +#ifndef __ASSEMBLY__ + +void dcache_wb_line(unsigned long start); + +void icache_inv_range(unsigned long start, unsigned long end); +void icache_inv_all(void); +void local_icache_inv_all(void *priv); + +void dcache_wb_range(unsigned long start, unsigned long end); +void dcache_wbinv_all(void); + +void cache_wbinv_range(unsigned long start, unsigned long end); +void cache_wbinv_all(void); + +void dma_wbinv_range(unsigned long start, unsigned long end); +void dma_inv_range(unsigned long start, unsigned long end); +void dma_wb_range(unsigned long start, unsigned long end); + +#endif +#endif /* __ASM_CSKY_CACHE_H */ diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h new file mode 100644 index 000000000..f0b8f2542 --- /dev/null +++ b/arch/csky/include/asm/cacheflush.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_CACHEFLUSH_H +#define __ASM_CSKY_CACHEFLUSH_H + +#include <linux/mm.h> +#include <abi/cacheflush.h> + +#endif /* __ASM_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/include/asm/checksum.h b/arch/csky/include/asm/checksum.h new file mode 100644 index 000000000..768582429 --- /dev/null +++ b/arch/csky/include/asm/checksum.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_CHECKSUM_H +#define __ASM_CSKY_CHECKSUM_H + +#include <linux/in6.h> +#include <asm/byteorder.h> + +static inline __sum16 csum_fold(__wsum csum) +{ + u32 tmp; + + asm volatile( + "mov %1, %0\n" + "rori %0, 16\n" + "addu %0, %1\n" + "lsri %0, 16\n" + : "=r"(csum), "=r"(tmp) + : "0"(csum)); + + return (__force __sum16) ~csum; +} +#define csum_fold csum_fold + +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, unsigned short proto, __wsum sum) +{ + asm volatile( + "clrc\n" + "addc %0, %1\n" + "addc %0, %2\n" + "addc %0, %3\n" + "inct %0\n" + : "=r"(sum) + : "r"((__force u32)saddr), "r"((__force u32)daddr), +#ifdef __BIG_ENDIAN + "r"(proto + len), +#else + "r"((proto + len) << 8), +#endif + "0" ((__force unsigned long)sum) + : "cc"); + return sum; +} +#define csum_tcpudp_nofold csum_tcpudp_nofold + +#include <asm-generic/checksum.h> + +#endif /* __ASM_CSKY_CHECKSUM_H */ diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h new file mode 100644 index 000000000..89224530a --- /dev/null +++ b/arch/csky/include/asm/cmpxchg.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_CMPXCHG_H +#define __ASM_CSKY_CMPXCHG_H + +#ifdef CONFIG_CPU_HAS_LDSTEX +#include <asm/barrier.h> + +extern void __bad_xchg(void); + +#define __xchg(new, ptr, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + unsigned long tmp; \ + switch (size) { \ + case 4: \ + smp_mb(); \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " mov %1, %2 \n" \ + " stex.w %1, (%3) \n" \ + " bez %1, 1b \n" \ + : "=&r" (__ret), "=&r" (tmp) \ + : "r" (__new), "r"(__ptr) \ + :); \ + smp_mb(); \ + break; \ + default: \ + __bad_xchg(); \ + } \ + __ret; \ +}) + +#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)))) + +#define __cmpxchg(ptr, old, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(new) __tmp; \ + __typeof__(old) __old = (old); \ + __typeof__(*(ptr)) __ret; \ + switch (size) { \ + case 4: \ + smp_mb(); \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " cmpne %0, %4 \n" \ + " bt 2f \n" \ + " mov %1, %2 \n" \ + " stex.w %1, (%3) \n" \ + " bez %1, 1b \n" \ + "2: \n" \ + : "=&r" (__ret), "=&r" (__tmp) \ + : "r" (__new), "r"(__ptr), "r"(__old) \ + :); \ + smp_mb(); \ + break; \ + default: \ + __bad_xchg(); \ + } \ + __ret; \ +}) + +#define cmpxchg(ptr, o, n) \ + (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))) +#else +#include <asm-generic/cmpxchg.h> +#endif + +#endif /* __ASM_CSKY_CMPXCHG_H */ diff --git a/arch/csky/include/asm/elf.h b/arch/csky/include/asm/elf.h new file mode 100644 index 000000000..e1ec55827 --- /dev/null +++ b/arch/csky/include/asm/elf.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_ELF_H +#define __ASM_CSKY_ELF_H + +#include <asm/ptrace.h> +#include <abi/regdef.h> + +#define ELF_ARCH EM_CSKY +#define EM_CSKY_OLD 39 + +/* CSKY Relocations */ +#define R_CSKY_NONE 0 +#define R_CSKY_32 1 +#define R_CSKY_PCIMM8BY4 2 +#define R_CSKY_PCIMM11BY2 3 +#define R_CSKY_PCIMM4BY2 4 +#define R_CSKY_PC32 5 +#define R_CSKY_PCRELJSR_IMM11BY2 6 +#define R_CSKY_GNU_VTINHERIT 7 +#define R_CSKY_GNU_VTENTRY 8 +#define R_CSKY_RELATIVE 9 +#define R_CSKY_COPY 10 +#define R_CSKY_GLOB_DAT 11 +#define R_CSKY_JUMP_SLOT 12 +#define R_CSKY_ADDR_HI16 24 +#define R_CSKY_ADDR_LO16 25 +#define R_CSKY_PCRELJSR_IMM26BY2 40 + +typedef unsigned long elf_greg_t; + +typedef struct user_fp elf_fpregset_t; + +/* + * In gdb/bfd elf32-csky.c, csky_elf_grok_prstatus() use fixed size of + * elf_prstatus. It's 148 for abiv1 and 220 for abiv2, the size is enough + * for coredump and no need full sizeof(struct pt_regs). + */ +#define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 2) + +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) || \ + ((x)->e_machine == EM_CSKY_OLD)) + +/* + * These are used to set parameters in the core dumps. + */ +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 +#define ELF_CLASS ELFCLASS32 +#define ELF_PLAT_INIT(_r, load_addr) { _r->a0 = 0; } + +#ifdef __cskyBE__ +#define ELF_DATA ELFDATA2MSB +#else +#define ELF_DATA ELFDATA2LSB +#endif + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ +#define ELF_ET_DYN_BASE 0x0UL +#include <abi/elf.h> + +/* Similar, but for a thread other than current. */ +struct task_struct; +extern int dump_task_regs(struct task_struct *tsk, elf_gregset_t *elf_regs); +#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) + +#define ELF_HWCAP (0) + +/* + * This yields a string that ld.so will use to load implementation specific + * libraries for optimization. This is more specific in intent than poking + * at uname or /proc/cpuinfo. + */ +#define ELF_PLATFORM (NULL) +#define SET_PERSONALITY(ex) set_personality(PER_LINUX) + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); +#endif /* __ASM_CSKY_ELF_H */ diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h new file mode 100644 index 000000000..81f9477d5 --- /dev/null +++ b/arch/csky/include/asm/fixmap.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_FIXMAP_H +#define __ASM_CSKY_FIXMAP_H + +#include <asm/page.h> +#include <asm/memory.h> +#ifdef CONFIG_HIGHMEM +#include <linux/threads.h> +#include <asm/kmap_types.h> +#endif + +enum fixed_addresses { +#ifdef CONFIG_HAVE_TCM + FIX_TCM = TCM_NR_PAGES, +#endif +#ifdef CONFIG_HIGHMEM + FIX_KMAP_BEGIN, + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, +#endif + __end_of_fixed_addresses +}; + +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#include <asm-generic/fixmap.h> + +extern void fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base); +extern void __init fixaddr_init(void); + +#endif /* __ASM_CSKY_FIXMAP_H */ diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h new file mode 100644 index 000000000..fae72b0b1 --- /dev/null +++ b/arch/csky/include/asm/ftrace.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_FTRACE_H +#define __ASM_CSKY_FTRACE_H + +#define MCOUNT_INSN_SIZE 14 + +#define HAVE_FUNCTION_GRAPH_FP_TEST + +#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + +#define ARCH_SUPPORTS_FTRACE_OPS 1 + +#define MCOUNT_ADDR ((unsigned long)_mcount) + +#ifndef __ASSEMBLY__ + +extern void _mcount(unsigned long); + +extern void ftrace_graph_call(void); + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} + +struct dyn_arch_ftrace { +}; +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_CSKY_FTRACE_H */ diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h new file mode 100644 index 000000000..14645e3d5 --- /dev/null +++ b/arch/csky/include/asm/highmem.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_HIGHMEM_H +#define __ASM_CSKY_HIGHMEM_H + +#ifdef __KERNEL__ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/uaccess.h> +#include <asm/kmap_types.h> +#include <asm/cache.h> + +/* undef for production */ +#define HIGHMEM_DEBUG 1 + +/* declarations for highmem.c */ +extern unsigned long highstart_pfn, highend_pfn; + +extern pte_t *pkmap_page_table; + +/* + * Right now we initialize only a single pte table. It can be extended + * easily, subsequent pte tables have to be allocated in one physical + * chunk of RAM. + */ +#define LAST_PKMAP 1024 +#define LAST_PKMAP_MASK (LAST_PKMAP-1) +#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +#define ARCH_HAS_KMAP_FLUSH_TLB +extern void kmap_flush_tlb(unsigned long addr); +extern void *kmap_atomic_pfn(unsigned long pfn); + +#define flush_cache_kmaps() do {} while (0) + +extern void kmap_init(void); + +#endif /* __KERNEL__ */ + +#endif /* __ASM_CSKY_HIGHMEM_H */ diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h new file mode 100644 index 000000000..e909587f2 --- /dev/null +++ b/arch/csky/include/asm/io.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_IO_H +#define __ASM_CSKY_IO_H + +#include <linux/pgtable.h> +#include <linux/types.h> +#include <linux/version.h> + +/* + * I/O memory access primitives. Reads are ordered relative to any + * following Normal memory access. Writes are ordered relative to any prior + * Normal memory access. + * + * For CACHEV1 (807, 810), store instruction could fast retire, so we need + * another mb() to prevent st fast retire. + * + * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't + * fast retire. + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; }) + +#ifdef CONFIG_CPU_HAS_CACHEV2 +#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); }) +#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); }) +#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); }) +#else +#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); }) +#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); }) +#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); }) +#endif + +/* + * I/O memory mapping functions. + */ +#define ioremap_wc(addr, size) \ + ioremap_prot((addr), (size), \ + (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED) + +#include <asm-generic/io.h> + +#endif /* __ASM_CSKY_IO_H */ diff --git a/arch/csky/include/asm/irq_work.h b/arch/csky/include/asm/irq_work.h new file mode 100644 index 000000000..33aaf39d6 --- /dev/null +++ b/arch/csky/include/asm/irq_work.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_IRQ_WORK_H +#define __ASM_CSKY_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return true; +} +extern void arch_irq_work_raise(void); +#endif /* __ASM_CSKY_IRQ_WORK_H */ diff --git a/arch/csky/include/asm/irqflags.h b/arch/csky/include/asm/irqflags.h new file mode 100644 index 000000000..9e3a569a5 --- /dev/null +++ b/arch/csky/include/asm/irqflags.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_IRQFLAGS_H +#define __ASM_CSKY_IRQFLAGS_H +#include <abi/reg_ops.h> + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + flags = mfcr("psr"); + asm volatile("psrclr ie\n":::"memory"); + return flags; +} +#define arch_local_irq_save arch_local_irq_save + +static inline void arch_local_irq_enable(void) +{ + asm volatile("psrset ee, ie\n":::"memory"); +} +#define arch_local_irq_enable arch_local_irq_enable + +static inline void arch_local_irq_disable(void) +{ + asm volatile("psrclr ie\n":::"memory"); +} +#define arch_local_irq_disable arch_local_irq_disable + +static inline unsigned long arch_local_save_flags(void) +{ + return mfcr("psr"); +} +#define arch_local_save_flags arch_local_save_flags + +static inline void arch_local_irq_restore(unsigned long flags) +{ + mtcr("psr", flags); +} +#define arch_local_irq_restore arch_local_irq_restore + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (1<<6)); +} +#define arch_irqs_disabled_flags arch_irqs_disabled_flags + +#include <asm-generic/irqflags.h> + +#endif /* __ASM_CSKY_IRQFLAGS_H */ diff --git a/arch/csky/include/asm/kprobes.h b/arch/csky/include/asm/kprobes.h new file mode 100644 index 000000000..b647bbde4 --- /dev/null +++ b/arch/csky/include/asm/kprobes.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_KPROBES_H +#define __ASM_CSKY_KPROBES_H + +#include <asm-generic/kprobes.h> + +#ifdef CONFIG_KPROBES +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/percpu.h> + +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 1 + +#define flush_insn_slot(p) do { } while (0) +#define kretprobe_blacklist_size 0 + +#include <asm/probes.h> + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +/* Single step context for kprobe */ +struct kprobe_step_ctx { + unsigned long ss_pending; + unsigned long match_addr; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned int kprobe_status; + unsigned long saved_sr; + struct prev_kprobe prev_kprobe; + struct kprobe_step_ctx ss_ctx; +}; + +void arch_remove_kprobe(struct kprobe *p); +int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); +int kprobe_breakpoint_handler(struct pt_regs *regs); +int kprobe_single_step_handler(struct pt_regs *regs); +void kretprobe_trampoline(void); +void __kprobes *trampoline_probe_handler(struct pt_regs *regs); + +#endif /* CONFIG_KPROBES */ +#endif /* __ASM_CSKY_KPROBES_H */ diff --git a/arch/csky/include/asm/memory.h b/arch/csky/include/asm/memory.h new file mode 100644 index 000000000..a65c6759f --- /dev/null +++ b/arch/csky/include/asm/memory.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_MEMORY_H +#define __ASM_CSKY_MEMORY_H + +#include <linux/compiler.h> +#include <linux/const.h> +#include <linux/types.h> +#include <linux/sizes.h> + +#define FIXADDR_TOP _AC(0xffffc000, UL) +#define PKMAP_BASE _AC(0xff800000, UL) +#define VMALLOC_START _AC(0xc0008000, UL) +#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2)) + +#ifdef CONFIG_HAVE_TCM +#ifdef CONFIG_HAVE_DTCM +#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES) +#else +#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES) +#endif +#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL) +#endif + +#endif diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h new file mode 100644 index 000000000..26fbb1d15 --- /dev/null +++ b/arch/csky/include/asm/mmu.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_MMU_H +#define __ASM_CSKY_MMU_H + +typedef struct { + atomic64_t asid; + void *vdso; + cpumask_t icache_stale_mask; +} mm_context_t; + +#endif /* __ASM_CSKY_MMU_H */ diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h new file mode 100644 index 000000000..abdf1f1cb --- /dev/null +++ b/arch/csky/include/asm/mmu_context.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_MMU_CONTEXT_H +#define __ASM_CSKY_MMU_CONTEXT_H + +#include <asm-generic/mm_hooks.h> +#include <asm/setup.h> +#include <asm/page.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> + +#include <linux/errno.h> +#include <linux/sched.h> +#include <abi/ckmmu.h> + +#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ + setup_pgd(__pa(pgd), false) + +#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ + setup_pgd(__pa(pgd), true) + +#define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1) +#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK) + +#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; }) +#define activate_mm(prev,next) switch_mm(prev, next, current) + +#define destroy_context(mm) do {} while (0) +#define enter_lazy_tlb(mm, tsk) do {} while (0) +#define deactivate_mm(tsk, mm) do {} while (0) + +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); + +static inline void +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned int cpu = smp_processor_id(); + + if (prev != next) + check_and_switch_context(next, cpu); + + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + write_mmu_entryhi(next->context.asid.counter); + + flush_icache_deferred(next); +} +#endif /* __ASM_CSKY_MMU_CONTEXT_H */ diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h new file mode 100644 index 000000000..16878240e --- /dev/null +++ b/arch/csky/include/asm/page.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PAGE_H +#define __ASM_CSKY_PAGE_H + +#include <asm/setup.h> +#include <asm/cache.h> +#include <linux/const.h> + +/* + * PAGE_SHIFT determines the page size: 4KB + */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define THREAD_SIZE (PAGE_SIZE * 2) +#define THREAD_MASK (~(THREAD_SIZE - 1)) +#define THREAD_SHIFT (PAGE_SHIFT + 1) + + +/* + * For C-SKY "User-space:Kernel-space" is "2GB:2GB" fixed by hardware and there + * are two segment registers (MSA0 + MSA1) to mapping 512MB + 512MB physical + * address region. We use them mapping kernel 1GB direct-map address area and + * for more than 1GB of memory we use highmem. + */ +#define PAGE_OFFSET 0x80000000 +#define SSEG_SIZE 0x20000000 +#define LOWMEM_LIMIT (SSEG_SIZE * 2) + +#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1)) + +#ifndef __ASSEMBLY__ + +#include <linux/pfn.h> + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) + +#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \ + (void *)(kaddr) < high_memory) +#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) + +extern void *memset(void *dest, int c, size_t l); +extern void *memcpy(void *to, const void *from, size_t l); + +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) +#define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr))) + +struct page; + +#include <abi/page.h> + +struct vm_area_struct; + +typedef struct { unsigned long pte_low; } pte_t; +#define pte_val(x) ((x).pte_low) + +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct page *pgtable_t; + +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) + +#define __pte(x) ((pte_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) + +extern unsigned long va_pa_offset; + +#define ARCH_PFN_OFFSET PFN_DOWN(va_pa_offset + PHYS_OFFSET_OFFSET) + +#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + va_pa_offset) +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - va_pa_offset)) + +#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) + +#define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \ + PHYS_OFFSET_OFFSET) +#define virt_to_page(x) (mem_map + MAP_NR(x)) + +#define pfn_to_kaddr(x) __va(PFN_PHYS(x)) + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_CSKY_PAGE_H */ diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h new file mode 100644 index 000000000..ebc765b1f --- /dev/null +++ b/arch/csky/include/asm/pci.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_PCI_H +#define __ASM_CSKY_PCI_H + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> + +#include <asm/io.h> + +#define PCIBIOS_MIN_IO 0 +#define PCIBIOS_MIN_MEM 0 + +/* C-SKY shim does not initialize PCI bus */ +#define pcibios_assign_all_busses() 1 + +extern int isa_dma_bridge_buggy; + +#ifdef CONFIG_PCI +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + /* no legacy IRQ on csky */ + return -ENODEV; +} + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + /* always show the domain in /proc */ + return 1; +} +#endif /* CONFIG_PCI */ + +#endif /* __ASM_CSKY_PCI_H */ diff --git a/arch/csky/include/asm/perf_event.h b/arch/csky/include/asm/perf_event.h new file mode 100644 index 000000000..572093e11 --- /dev/null +++ b/arch/csky/include/asm/perf_event.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PERF_EVENT_H +#define __ASM_CSKY_PERF_EVENT_H + +#include <abi/regdef.h> + +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->pc = (__ip); \ + regs_fp(regs) = (unsigned long) __builtin_frame_address(0); \ + asm volatile("mov %0, sp\n":"=r"((regs)->usp)); \ +} + +#endif /* __ASM_PERF_EVENT_ELF_H */ diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h new file mode 100644 index 000000000..d58d8146b --- /dev/null +++ b/arch/csky/include/asm/pgalloc.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PGALLOC_H +#define __ASM_CSKY_PGALLOC_H + +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/sched.h> + +#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL +#include <asm-generic/pgalloc.h> + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + set_pmd(pmd, __pmd(__pa(pte))); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte) +{ + set_pmd(pmd, __pmd(__pa(page_address(pte)))); +} + +#define pmd_pgtable(pmd) pmd_page(pmd) + +extern void pgd_init(unsigned long *p); + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + pte_t *pte; + unsigned long i; + + pte = (pte_t *) __get_free_page(GFP_KERNEL); + if (!pte) + return NULL; + + for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++) + (pte + i)->pte_low = _PAGE_GLOBAL; + + return pte; +} + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret; + pgd_t *init; + + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long *)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + /* prevent out of order excute */ + smp_mb(); +#ifdef CONFIG_CPU_NEED_TLBSYNC + dcache_wb_range((unsigned int)ret, + (unsigned int)(ret + PTRS_PER_PGD)); +#endif + } + + return ret; +} + +#define __pte_free_tlb(tlb, pte, address) \ +do { \ + pgtable_pte_page_dtor(pte); \ + tlb_remove_page(tlb, pte); \ +} while (0) + +extern void pagetable_init(void); +extern void pre_mmu_init(void); +extern void pre_trap_init(void); + +#endif /* __ASM_CSKY_PGALLOC_H */ diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h new file mode 100644 index 000000000..2002cb7f1 --- /dev/null +++ b/arch/csky/include/asm/pgtable.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PGTABLE_H +#define __ASM_CSKY_PGTABLE_H + +#include <asm/fixmap.h> +#include <asm/memory.h> +#include <asm/addrspace.h> +#include <abi/pgtable-bits.h> +#include <asm-generic/pgtable-nopmd.h> + +#define PGDIR_SHIFT 22 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0UL + +/* + * C-SKY is two-level paging structure: + */ +#define PGD_ORDER 0 +#define PTE_ORDER 0 + +#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PMD 1 +#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) +#define pte_clear(mm, addr, ptep) set_pte((ptep), \ + (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) +#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) +#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ + | pgprot_val(prot)) + +#define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED) +#define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED) + +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \ + _CACHE_MASK) + +#define __swp_type(x) (((x).val >> 4) & 0xff) +#define __swp_offset(x) ((x).val >> 12) +#define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \ + ((offset) << 12) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#define pte_page(x) pfn_to_page(pte_pfn(x)) +#define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \ + pgprot_val(pgprot)) + +/* + * CSKY can't do page protection for execute, and considers that the same like + * read. Also, write permissions imply read permissions. This is the closest + * we can get by reasonable means.. + */ +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _CACHE_CACHED) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _CACHE_CACHED) +#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _CACHE_CACHED) + +#define _PAGE_IOREMAP \ + (_PAGE_PRESENT | __READABLE | __WRITEABLE | _PAGE_GLOBAL | \ + _CACHE_UNCACHED | _PAGE_SO) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED + +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +extern void load_pgd(unsigned long pg_dir); +extern pte_t invalid_pte_table[PTRS_PER_PTE]; + +static inline void set_pte(pte_t *p, pte_t pte) +{ + *p = pte; +#if defined(CONFIG_CPU_NEED_TLBSYNC) + dcache_wb_line((u32)p); +#endif + /* prevent out of order excution */ + smp_mb(); +} +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) + +static inline pte_t *pmd_page_vaddr(pmd_t pmd) +{ + unsigned long ptr; + + ptr = pmd_val(pmd); + + return __va(ptr); +} + +#define pmd_phys(pmd) pmd_val(pmd) + +static inline void set_pmd(pmd_t *p, pmd_t pmd) +{ + *p = pmd; +#if defined(CONFIG_CPU_NEED_TLBSYNC) + dcache_wb_line((u32)p); +#endif + /* prevent specul excute */ + smp_mb(); +} + + +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == __pa(invalid_pte_table); +} + +#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) + +static inline int pmd_present(pmd_t pmd) +{ + return (pmd_val(pmd) != __pa(invalid_pte_table)); +} + +static inline void pmd_clear(pmd_t *p) +{ + pmd_val(*p) = (__pa(invalid_pte_table)); +#if defined(CONFIG_CPU_NEED_TLBSYNC) + dcache_wb_line((u32)p); +#endif +} + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_read(pte_t pte) +{ + return pte.pte_low & _PAGE_READ; +} + +static inline int pte_write(pte_t pte) +{ + return (pte).pte_low & _PAGE_WRITE; +} + +static inline int pte_dirty(pte_t pte) +{ + return (pte).pte_low & _PAGE_MODIFIED; +} + +static inline int pte_young(pte_t pte) +{ + return (pte).pte_low & _PAGE_ACCESSED; +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID); + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + if (pte_val(pte) & _PAGE_READ) + pte_val(pte) |= _PAGE_VALID; + return pte; +} + +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); + +/* + * Macro to make mark a page protection value as "uncacheable". Note + * that "protection" is really a misnomer here as the protection value + * contains the memory attribute bits, dirty bits, and various other + * bits as well. + */ +#define pgprot_noncached pgprot_noncached + +static inline pgprot_t pgprot_noncached(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO; + + return __pgprot(prot); +} + +#define pgprot_writecombine pgprot_writecombine +static inline pgprot_t pgprot_writecombine(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; + + return __pgprot(prot); +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | + (pgprot_val(newprot))); +} + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern void paging_init(void); + +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *pte); + +/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ +#define kern_addr_valid(addr) (1) + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +#endif /* __ASM_CSKY_PGTABLE_H */ diff --git a/arch/csky/include/asm/probes.h b/arch/csky/include/asm/probes.h new file mode 100644 index 000000000..5e526334e --- /dev/null +++ b/arch/csky/include/asm/probes.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_PROBES_H +#define __ASM_CSKY_PROBES_H + +typedef u32 probe_opcode_t; +typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *); + +/* architecture specific copy of original instruction */ +struct arch_probe_insn { + probe_opcode_t *insn; + probes_handler_t *handler; + /* restore address after simulation */ + unsigned long restore; +}; + +#ifdef CONFIG_KPROBES +typedef u32 kprobe_opcode_t; +struct arch_specific_insn { + struct arch_probe_insn api; +}; +#endif + +#endif /* __ASM_CSKY_PROBES_H */ diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h new file mode 100644 index 000000000..4800f6563 --- /dev/null +++ b/arch/csky/include/asm/processor.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PROCESSOR_H +#define __ASM_CSKY_PROCESSOR_H + +#include <linux/bitops.h> +#include <asm/segment.h> +#include <asm/ptrace.h> +#include <asm/current.h> +#include <asm/cache.h> +#include <abi/reg_ops.h> +#include <abi/regdef.h> +#include <abi/switch_context.h> +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +#endif + +struct cpuinfo_csky { + unsigned long asid_cache; +} __aligned(SMP_CACHE_BYTES); + +extern struct cpuinfo_csky cpu_data[]; + +/* + * User space process size: 2GB. This is hardcoded into a few places, + * so don't change it unless you know what you are doing. TASK_SIZE + * for a 64 bit kernel expandable to 8192EB, of which the current CSKY + * implementations will "only" be able to use 1TB ... + */ +#define TASK_SIZE 0x7fff8000UL + +#ifdef __KERNEL__ +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP +#endif + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (TASK_SIZE / 3) + +struct thread_struct { + unsigned long sp; /* kernel stack pointer */ + unsigned long trap_no; /* saved status register */ + + /* FPU regs */ + struct user_fp __aligned(16) user_fp; +}; + +#define INIT_THREAD { \ + .sp = sizeof(init_stack) + (unsigned long) &init_stack, \ +} + +/* + * Do necessary setup to start up a newly executed thread. + * + * pass the data segment into user programs if it exists, + * it can't hurt anything as far as I can tell + */ +#define start_thread(_regs, _pc, _usp) \ +do { \ + set_fs(USER_DS); /* reads from user space */ \ + (_regs)->pc = (_pc); \ + (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \ + (_regs)->regs[2] = 0; \ + (_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */ \ + (_regs)->sr &= ~PS_S; \ + (_regs)->usp = (_usp); \ +} while (0) + +/* Forward declaration, a strange C thing */ +struct task_struct; + +/* Free all resources held by a thread. */ +static inline void release_thread(struct task_struct *dead_task) +{ +} + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +unsigned long get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) + +#define task_pt_regs(p) \ + ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) + +#define cpu_relax() barrier() + +#endif /* __ASM_CSKY_PROCESSOR_H */ diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h new file mode 100644 index 000000000..91ceb1b45 --- /dev/null +++ b/arch/csky/include/asm/ptrace.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_PTRACE_H +#define __ASM_CSKY_PTRACE_H + +#include <uapi/asm/ptrace.h> +#include <asm/traps.h> +#include <linux/types.h> +#include <linux/compiler.h> + +#ifndef __ASSEMBLY__ + +#define PS_S 0x80000000 /* Supervisor Mode */ + +#define USR_BKPT 0x1464 + +#define arch_has_single_step() (1) +#define current_pt_regs() \ +({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; }) + +#define user_stack_pointer(regs) ((regs)->usp) + +#define user_mode(regs) (!((regs)->sr & PS_S)) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +#define trap_no(regs) ((regs->sr >> 16) & 0xff) + +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->pc = val; +} + +#if defined(__CSKYABIV2__) +#define MAX_REG_OFFSET offsetof(struct pt_regs, dcsr) +#else +#define MAX_REG_OFFSET offsetof(struct pt_regs, regs[9]) +#endif + +static inline bool in_syscall(struct pt_regs const *regs) +{ + return ((regs->sr >> 16) & 0xff) == VEC_TRAP0; +} + +static inline void forget_syscall(struct pt_regs *regs) +{ + regs->sr &= ~(0xff << 16); +} + +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->a0; +} + +static inline void regs_set_return_value(struct pt_regs *regs, + unsigned long val) +{ + regs->a0 = val; +} + +/* Valid only for Kernel mode traps. */ +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->usp; +} + +static inline unsigned long frame_pointer(struct pt_regs *regs) +{ + return regs->regs[4]; +} +static inline void frame_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->regs[4] = val; +} + +extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +/* + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_CSKY_PTRACE_H */ diff --git a/arch/csky/include/asm/reg_ops.h b/arch/csky/include/asm/reg_ops.h new file mode 100644 index 000000000..cccf7d525 --- /dev/null +++ b/arch/csky/include/asm/reg_ops.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_REGS_OPS_H +#define __ASM_REGS_OPS_H + +#define mfcr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile( \ + "mfcr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define mtcr(reg, val) \ +({ \ + asm volatile( \ + "mtcr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +#endif /* __ASM_REGS_OPS_H */ diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h new file mode 100644 index 000000000..79ede9b1a --- /dev/null +++ b/arch/csky/include/asm/segment.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_SEGMENT_H +#define __ASM_CSKY_SEGMENT_H + +typedef struct { + unsigned long seg; +} mm_segment_t; + +#define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) + +#define USER_DS ((mm_segment_t) { 0x80000000UL }) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) + +#endif /* __ASM_CSKY_SEGMENT_H */ diff --git a/arch/csky/include/asm/shmparam.h b/arch/csky/include/asm/shmparam.h new file mode 100644 index 000000000..efafe4c79 --- /dev/null +++ b/arch/csky/include/asm/shmparam.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_SHMPARAM_H +#define __ASM_CSKY_SHMPARAM_H + +#define SHMLBA (4 * PAGE_SIZE) + +#define __ARCH_FORCE_SHMLBA + +#endif /* __ASM_CSKY_SHMPARAM_H */ diff --git a/arch/csky/include/asm/smp.h b/arch/csky/include/asm/smp.h new file mode 100644 index 000000000..668b79ce2 --- /dev/null +++ b/arch/csky/include/asm/smp.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SMP_H +#define __ASM_CSKY_SMP_H + +#include <linux/cpumask.h> +#include <linux/irqreturn.h> +#include <linux/threads.h> + +#ifdef CONFIG_SMP + +void __init setup_smp(void); + +void __init setup_smp_ipi(void); + +void arch_send_call_function_ipi_mask(struct cpumask *mask); + +void arch_send_call_function_single_ipi(int cpu); + +void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq); + +#define raw_smp_processor_id() (current_thread_info()->cpu) + +int __cpu_disable(void); + +void __cpu_die(unsigned int cpu); + +#endif /* CONFIG_SMP */ + +#endif /* __ASM_CSKY_SMP_H */ diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h new file mode 100644 index 000000000..7cf3f2b34 --- /dev/null +++ b/arch/csky/include/asm/spinlock.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SPINLOCK_H +#define __ASM_CSKY_SPINLOCK_H + +#include <linux/spinlock_types.h> +#include <asm/barrier.h> + +#ifdef CONFIG_QUEUED_RWLOCKS + +/* + * Ticket-based spin-locking. + */ +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + arch_spinlock_t lockval; + u32 ticket_next = 1 << TICKET_NEXT; + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%2) \n" + " mov %1, %0 \n" + " add %0, %3 \n" + " stex.w %0, (%2) \n" + " bez %0, 1b \n" + : "=&r" (tmp), "=&r" (lockval) + : "r"(p), "r"(ticket_next) + : "cc"); + + while (lockval.tickets.next != lockval.tickets.owner) + lockval.tickets.owner = READ_ONCE(lock->tickets.owner); + + smp_mb(); +} + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + u32 tmp, contended, res; + u32 ticket_next = 1 << TICKET_NEXT; + u32 *p = &lock->lock; + + do { + asm volatile ( + " ldex.w %0, (%3) \n" + " movi %2, 1 \n" + " rotli %1, %0, 16 \n" + " cmpne %1, %0 \n" + " bt 1f \n" + " movi %2, 0 \n" + " add %0, %0, %4 \n" + " stex.w %0, (%3) \n" + "1: \n" + : "=&r" (res), "=&r" (tmp), "=&r" (contended) + : "r"(p), "r"(ticket_next) + : "cc"); + } while (!res); + + if (!contended) + smp_mb(); + + return !contended; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1); +} + +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.tickets.owner == lock.tickets.next; +} + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + return !arch_spin_value_unlocked(READ_ONCE(*lock)); +} + +static inline int arch_spin_is_contended(arch_spinlock_t *lock) +{ + struct __raw_tickets tickets = READ_ONCE(lock->tickets); + + return (tickets.next - tickets.owner) > 1; +} +#define arch_spin_is_contended arch_spin_is_contended + +#include <asm/qrwlock.h> + +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + +#else /* CONFIG_QUEUED_RWLOCKS */ + +/* + * Test-and-set spin-locking. + */ +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " bnez %0, 1b \n" + " movi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + smp_mb(); +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + WRITE_ONCE(lock->lock, 0); +} + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " bnez %0, 2f \n" + " movi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + " movi %0, 0 \n" + "2: \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + + if (!tmp) + smp_mb(); + + return !tmp; +} + +#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0) + +/* + * read lock/unlock/trylock + */ +static inline void arch_read_lock(arch_rwlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " blz %0, 1b \n" + " addi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + smp_mb(); +} + +static inline void arch_read_unlock(arch_rwlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + smp_mb(); + asm volatile ( + "1: ldex.w %0, (%1) \n" + " subi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); +} + +static inline int arch_read_trylock(arch_rwlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " blz %0, 2f \n" + " addi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + " movi %0, 0 \n" + "2: \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + + if (!tmp) + smp_mb(); + + return !tmp; +} + +/* + * write lock/unlock/trylock + */ +static inline void arch_write_lock(arch_rwlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " bnez %0, 1b \n" + " subi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + smp_mb(); +} + +static inline void arch_write_unlock(arch_rwlock_t *lock) +{ + smp_mb(); + WRITE_ONCE(lock->lock, 0); +} + +static inline int arch_write_trylock(arch_rwlock_t *lock) +{ + u32 *p = &lock->lock; + u32 tmp; + + asm volatile ( + "1: ldex.w %0, (%1) \n" + " bnez %0, 2f \n" + " subi %0, 1 \n" + " stex.w %0, (%1) \n" + " bez %0, 1b \n" + " movi %0, 0 \n" + "2: \n" + : "=&r" (tmp) + : "r"(p) + : "cc"); + + if (!tmp) + smp_mb(); + + return !tmp; +} + +#endif /* CONFIG_QUEUED_RWLOCKS */ +#endif /* __ASM_CSKY_SPINLOCK_H */ diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h new file mode 100644 index 000000000..88b82438b --- /dev/null +++ b/arch/csky/include/asm/spinlock_types.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SPINLOCK_TYPES_H +#define __ASM_CSKY_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +#define TICKET_NEXT 16 + +typedef struct { + union { + u32 lock; + struct __raw_tickets { + /* little endian */ + u16 owner; + u16 next; + } tickets; + }; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } + +#ifdef CONFIG_QUEUED_RWLOCKS +#include <asm-generic/qrwlock_types.h> + +#else /* CONFIG_NR_CPUS > 2 */ + +typedef struct { + u32 lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { 0 } + +#endif /* CONFIG_QUEUED_RWLOCKS */ +#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */ diff --git a/arch/csky/include/asm/stackprotector.h b/arch/csky/include/asm/stackprotector.h new file mode 100644 index 000000000..d7cd4e51e --- /dev/null +++ b/arch/csky/include/asm/stackprotector.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H 1 + +#include <linux/random.h> +#include <linux/version.h> + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + canary &= CANARY_MASK; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* __ASM_SH_STACKPROTECTOR_H */ diff --git a/arch/csky/include/asm/string.h b/arch/csky/include/asm/string.h new file mode 100644 index 000000000..73142de18 --- /dev/null +++ b/arch/csky/include/asm/string.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef _CSKY_STRING_MM_H_ +#define _CSKY_STRING_MM_H_ + +#ifndef __ASSEMBLY__ +#include <linux/types.h> +#include <linux/compiler.h> +#include <abi/string.h> +#endif + +#endif /* _CSKY_STRING_MM_H_ */ diff --git a/arch/csky/include/asm/switch_to.h b/arch/csky/include/asm/switch_to.h new file mode 100644 index 000000000..35a39e889 --- /dev/null +++ b/arch/csky/include/asm/switch_to.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_SWITCH_TO_H +#define __ASM_CSKY_SWITCH_TO_H + +#include <linux/thread_info.h> +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +static inline void __switch_to_fpu(struct task_struct *prev, + struct task_struct *next) +{ + save_to_user_fp(&prev->thread.user_fp); + restore_from_user_fp(&next->thread.user_fp); +} +#else +static inline void __switch_to_fpu(struct task_struct *prev, + struct task_struct *next) +{} +#endif + +/* + * Context switching is now performed out-of-line in switch_to.S + */ +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); + +#define switch_to(prev, next, last) \ + do { \ + struct task_struct *__prev = (prev); \ + struct task_struct *__next = (next); \ + __switch_to_fpu(__prev, __next); \ + ((last) = __switch_to((prev), (next))); \ + } while (0) + +#endif /* __ASM_CSKY_SWITCH_TO_H */ diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h new file mode 100644 index 000000000..f624fa3bb --- /dev/null +++ b/arch/csky/include/asm/syscall.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_SYSCALL_H +#define __ASM_SYSCALL_H + +#include <linux/sched.h> +#include <linux/err.h> +#include <abi/regdef.h> +#include <uapi/linux/audit.h> + +extern void *sys_call_table[]; + +static inline int +syscall_get_nr(struct task_struct *task, struct pt_regs *regs) +{ + return regs_syscallid(regs); +} + +static inline void +syscall_set_nr(struct task_struct *task, struct pt_regs *regs, + int sysno) +{ + regs_syscallid(regs) = sysno; +} + +static inline void +syscall_rollback(struct task_struct *task, struct pt_regs *regs) +{ + regs->a0 = regs->orig_a0; +} + +static inline long +syscall_get_error(struct task_struct *task, struct pt_regs *regs) +{ + unsigned long error = regs->a0; + + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long +syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) +{ + return regs->a0; +} + +static inline void +syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, + int error, long val) +{ + regs->a0 = (long) error ?: val; +} + +static inline void +syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned long *args) +{ + args[0] = regs->orig_a0; + args++; + memcpy(args, ®s->a1, 5 * sizeof(args[0])); +} + +static inline void +syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + const unsigned long *args) +{ + regs->orig_a0 = args[0]; + args++; + memcpy(®s->a1, args, 5 * sizeof(regs->a1)); +} + +static inline int +syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_CSKY; +} + +#endif /* __ASM_SYSCALL_H */ diff --git a/arch/csky/include/asm/syscalls.h b/arch/csky/include/asm/syscalls.h new file mode 100644 index 000000000..5d48e5e00 --- /dev/null +++ b/arch/csky/include/asm/syscalls.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_SYSCALLS_H +#define __ASM_CSKY_SYSCALLS_H + +#include <asm-generic/syscalls.h> + +long sys_cacheflush(void __user *, unsigned long, int); + +long sys_set_thread_area(unsigned long addr); + +long sys_csky_fadvise64_64(int fd, int advice, loff_t offset, loff_t len); + +#endif /* __ASM_CSKY_SYSCALLS_H */ diff --git a/arch/csky/include/asm/tcm.h b/arch/csky/include/asm/tcm.h new file mode 100644 index 000000000..bd1e662ec --- /dev/null +++ b/arch/csky/include/asm/tcm.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_TCM_H +#define __ASM_CSKY_TCM_H + +#ifndef CONFIG_HAVE_TCM +#error "You should not be including tcm.h unless you have a TCM!" +#endif + +#include <linux/compiler.h> + +/* Tag variables with this */ +#define __tcmdata __section(".tcm.data") +/* Tag constants with this */ +#define __tcmconst __section(".tcm.rodata") +/* Tag functions inside TCM called from outside TCM with this */ +#define __tcmfunc __section(".tcm.text") noinline +/* Tag function inside TCM called from inside TCM with this */ +#define __tcmlocalfunc __section(".tcm.text") + +void *tcm_alloc(size_t len); +void tcm_free(void *addr, size_t len); + +#endif diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h new file mode 100644 index 000000000..21456a373 --- /dev/null +++ b/arch/csky/include/asm/thread_info.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef _ASM_CSKY_THREAD_INFO_H +#define _ASM_CSKY_THREAD_INFO_H + +#ifndef __ASSEMBLY__ + +#include <linux/version.h> +#include <asm/types.h> +#include <asm/page.h> +#include <asm/processor.h> +#include <abi/switch_context.h> + +struct thread_info { + struct task_struct *task; + void *dump_exec_domain; + unsigned long flags; + int preempt_count; + unsigned long tp_value; + mm_segment_t addr_limit; + struct restart_block restart_block; + struct pt_regs *regs; + unsigned int cpu; +}; + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ + .cpu = 0, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) + +#define thread_saved_fp(tsk) \ + ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r8)) + +#define thread_saved_sp(tsk) \ + ((unsigned long)(tsk->thread.sp)) + +#define thread_saved_lr(tsk) \ + ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r15)) + +static inline struct thread_info *current_thread_info(void) +{ + unsigned long sp; + + asm volatile("mov %0, sp\n":"=r"(sp)); + + return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); +} + +#endif /* !__ASSEMBLY__ */ + +#define TIF_SIGPENDING 0 /* signal pending */ +#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_UPROBE 3 /* uprobe breakpoint or singlestep */ +#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ +#define TIF_SYSCALL_TRACEPOINT 5 /* syscall tracepoint instrumentation */ +#define TIF_SYSCALL_AUDIT 6 /* syscall auditing */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ +#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */ +#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ +#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ +#define TIF_SECCOMP 21 /* secure computing */ + +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_UPROBE (1 << TIF_UPROBE) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_MEMDIE (1 << TIF_MEMDIE) +#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) + +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NOTIFY_SIGNAL) + +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) + +#endif /* _ASM_CSKY_THREAD_INFO_H */ diff --git a/arch/csky/include/asm/tlb.h b/arch/csky/include/asm/tlb.h new file mode 100644 index 000000000..fdff9b8d7 --- /dev/null +++ b/arch/csky/include/asm/tlb.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_TLB_H +#define __ASM_CSKY_TLB_H + +#include <asm/cacheflush.h> + +#define tlb_start_vma(tlb, vma) \ + do { \ + if (!(tlb)->fullmm) \ + flush_cache_range(vma, (vma)->vm_start, (vma)->vm_end); \ + } while (0) + +#define tlb_end_vma(tlb, vma) \ + do { \ + if (!(tlb)->fullmm) \ + flush_tlb_range(vma, (vma)->vm_start, (vma)->vm_end); \ + } while (0) + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include <asm-generic/tlb.h> + +#endif /* __ASM_CSKY_TLB_H */ diff --git a/arch/csky/include/asm/tlbflush.h b/arch/csky/include/asm/tlbflush.h new file mode 100644 index 000000000..6845b0667 --- /dev/null +++ b/arch/csky/include/asm/tlbflush.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_TLBFLUSH_H +#define __ASM_TLBFLUSH_H + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLB entries + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + */ +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +extern void flush_tlb_one(unsigned long vaddr); + +#endif diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h new file mode 100644 index 000000000..1c081805b --- /dev/null +++ b/arch/csky/include/asm/traps.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_TRAPS_H +#define __ASM_CSKY_TRAPS_H + +#define VEC_RESET 0 +#define VEC_ALIGN 1 +#define VEC_ACCESS 2 +#define VEC_ZERODIV 3 +#define VEC_ILLEGAL 4 +#define VEC_PRIV 5 +#define VEC_TRACE 6 +#define VEC_BREAKPOINT 7 +#define VEC_UNRECOVER 8 +#define VEC_SOFTRESET 9 +#define VEC_AUTOVEC 10 +#define VEC_FAUTOVEC 11 +#define VEC_HWACCEL 12 + +#define VEC_TLBMISS 14 +#define VEC_TLBMODIFIED 15 + +#define VEC_TRAP0 16 +#define VEC_TRAP1 17 +#define VEC_TRAP2 18 +#define VEC_TRAP3 19 + +#define VEC_TLBINVALIDL 20 +#define VEC_TLBINVALIDS 21 + +#define VEC_PRFL 29 +#define VEC_FPE 30 + +extern void *vec_base[]; + +#define VEC_INIT(i, func) \ +do { \ + vec_base[i] = (void *)func; \ +} while (0) + +void csky_alignment(struct pt_regs *regs); + +#endif /* __ASM_CSKY_TRAPS_H */ diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h new file mode 100644 index 000000000..1633ffe5a --- /dev/null +++ b/arch/csky/include/asm/uaccess.h @@ -0,0 +1,410 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_UACCESS_H +#define __ASM_CSKY_UACCESS_H + +/* + * User space memory access functions + */ +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/version.h> +#include <asm/segment.h> + +static inline int access_ok(const void *addr, unsigned long size) +{ + unsigned long limit = current_thread_info()->addr_limit.seg; + + return (((unsigned long)addr < limit) && + ((unsigned long)(addr + size) < limit)); +} + +#define __addr_ok(addr) (access_ok(addr, 0)) + +extern int __put_user_bad(void); + +/* + * Tell gcc we read from memory instead of writing: this is because + * we do not write to any memory gcc knows about, so there are no + * aliasing issues. + */ + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * This gets kind of ugly. We want to return _two_ values in "get_user()" + * and yet we don't want to do any pointers, because that is too much + * of a performance impact. Thus we have a few rather ugly macros here, + * and hide all the ugliness from the user. + * + * The "__xxx" versions of the user access functions are versions that + * do not verify the address space, that must have been done previously + * with a separate "access_ok()" call (this is used when we do multiple + * accesses to the same area of user memory). + * + * As we use the same address space for kernel and user data on + * Ckcore, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + */ + +#define put_user(x, ptr) \ + __put_user_check((x), (ptr), sizeof(*(ptr))) + +#define __put_user(x, ptr) \ + __put_user_nocheck((x), (ptr), sizeof(*(ptr))) + +#define __ptr(x) ((unsigned long *)(x)) + +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) + +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + long __pu_err = 0; \ + typeof(*(ptr)) *__pu_addr = (ptr); \ + typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ + if (__pu_addr) \ + __put_user_size(__pu_val, (__pu_addr), (size), \ + __pu_err); \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + typeof(*(ptr)) *__pu_addr = (ptr); \ + typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \ + if (access_ok(__pu_addr, size) && __pu_addr) \ + __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: \ + __put_user_asm_b(x, ptr, retval); \ + break; \ + case 2: \ + __put_user_asm_h(x, ptr, retval); \ + break; \ + case 4: \ + __put_user_asm_w(x, ptr, retval); \ + break; \ + case 8: \ + __put_user_asm_64(x, ptr, retval); \ + break; \ + default: \ + __put_user_bad(); \ + } \ +} while (0) + +/* + * We don't tell gcc that we are accessing memory, but this is OK + * because we do not write to any memory gcc knows about, so there + * are no aliasing issues. + * + * Note that PC at a fault is the address *after* the faulting + * instruction. + */ +#define __put_user_asm_b(x, ptr, err) \ +do { \ + int errcode; \ + asm volatile( \ + "1: stb %1, (%2,0) \n" \ + " br 3f \n" \ + "2: mov %0, %3 \n" \ + " br 3f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b,2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __put_user_asm_h(x, ptr, err) \ +do { \ + int errcode; \ + asm volatile( \ + "1: sth %1, (%2,0) \n" \ + " br 3f \n" \ + "2: mov %0, %3 \n" \ + " br 3f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b,2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __put_user_asm_w(x, ptr, err) \ +do { \ + int errcode; \ + asm volatile( \ + "1: stw %1, (%2,0) \n" \ + " br 3f \n" \ + "2: mov %0, %3 \n" \ + " br 3f \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \ + : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __put_user_asm_64(x, ptr, err) \ +do { \ + int tmp; \ + int errcode; \ + typeof(*(ptr))src = (typeof(*(ptr)))x; \ + typeof(*(ptr))*psrc = &src; \ + \ + asm volatile( \ + " ldw %3, (%1, 0) \n" \ + "1: stw %3, (%2, 0) \n" \ + " ldw %3, (%1, 4) \n" \ + "2: stw %3, (%2, 4) \n" \ + " br 4f \n" \ + "3: mov %0, %4 \n" \ + " br 4f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 3b \n" \ + ".long 2b, 3b \n" \ + ".previous \n" \ + "4: \n" \ + : "=r"(err), "=r"(psrc), "=r"(ptr), \ + "=r"(tmp), "=r"(errcode) \ + : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \ + : "memory"); \ +} while (0) + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err; \ + __get_user_size(x, (ptr), (size), __gu_err); \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + int __gu_err = -EFAULT; \ + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + if (access_ok(__gu_ptr, size) && __gu_ptr) \ + __get_user_size(x, __gu_ptr, size, __gu_err); \ + __gu_err; \ +}) + +#define __get_user_size(x, ptr, size, retval) \ +do { \ + switch (size) { \ + case 1: \ + __get_user_asm_common((x), ptr, "ldb", retval); \ + break; \ + case 2: \ + __get_user_asm_common((x), ptr, "ldh", retval); \ + break; \ + case 4: \ + __get_user_asm_common((x), ptr, "ldw", retval); \ + break; \ + default: \ + x = 0; \ + (retval) = __get_user_bad(); \ + } \ +} while (0) + +#define __get_user_asm_common(x, ptr, ins, err) \ +do { \ + int errcode; \ + asm volatile( \ + "1: " ins " %1, (%4,0) \n" \ + " br 3f \n" \ + /* Fix up codes */ \ + "2: mov %0, %2 \n" \ + " movi %1, 0 \n" \ + " br 3f \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 2 \n" \ + ".long 1b, 2b \n" \ + ".previous \n" \ + "3: \n" \ + : "=r"(err), "=r"(x), "=r"(errcode) \ + : "0"(0), "r"(ptr), "2"(-EFAULT) \ + : "memory"); \ +} while (0) + +extern int __get_user_bad(void); + +#define ___copy_to_user(to, from, n) \ +do { \ + int w0, w1, w2, w3; \ + asm volatile( \ + "0: cmpnei %1, 0 \n" \ + " bf 8f \n" \ + " mov %3, %1 \n" \ + " or %3, %2 \n" \ + " andi %3, 3 \n" \ + " cmpnei %3, 0 \n" \ + " bf 1f \n" \ + " br 5f \n" \ + "1: cmplti %0, 16 \n" /* 4W */ \ + " bt 3f \n" \ + " ldw %3, (%2, 0) \n" \ + " ldw %4, (%2, 4) \n" \ + " ldw %5, (%2, 8) \n" \ + " ldw %6, (%2, 12) \n" \ + "2: stw %3, (%1, 0) \n" \ + "9: stw %4, (%1, 4) \n" \ + "10: stw %5, (%1, 8) \n" \ + "11: stw %6, (%1, 12) \n" \ + " addi %2, 16 \n" \ + " addi %1, 16 \n" \ + " subi %0, 16 \n" \ + " br 1b \n" \ + "3: cmplti %0, 4 \n" /* 1W */ \ + " bt 5f \n" \ + " ldw %3, (%2, 0) \n" \ + "4: stw %3, (%1, 0) \n" \ + " addi %2, 4 \n" \ + " addi %1, 4 \n" \ + " subi %0, 4 \n" \ + " br 3b \n" \ + "5: cmpnei %0, 0 \n" /* 1B */ \ + " bf 13f \n" \ + " ldb %3, (%2, 0) \n" \ + "6: stb %3, (%1, 0) \n" \ + " addi %2, 1 \n" \ + " addi %1, 1 \n" \ + " subi %0, 1 \n" \ + " br 5b \n" \ + "7: subi %0, 4 \n" \ + "8: subi %0, 4 \n" \ + "12: subi %0, 4 \n" \ + " br 13f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 2b, 13f \n" \ + ".long 4b, 13f \n" \ + ".long 6b, 13f \n" \ + ".long 9b, 12b \n" \ + ".long 10b, 8b \n" \ + ".long 11b, 7b \n" \ + ".previous \n" \ + "13: \n" \ + : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \ + "=r"(w1), "=r"(w2), "=r"(w3) \ + : "0"(n), "1"(to), "2"(from) \ + : "memory"); \ +} while (0) + +#define ___copy_from_user(to, from, n) \ +do { \ + int tmp; \ + int nsave; \ + asm volatile( \ + "0: cmpnei %1, 0 \n" \ + " bf 7f \n" \ + " mov %3, %1 \n" \ + " or %3, %2 \n" \ + " andi %3, 3 \n" \ + " cmpnei %3, 0 \n" \ + " bf 1f \n" \ + " br 5f \n" \ + "1: cmplti %0, 16 \n" \ + " bt 3f \n" \ + "2: ldw %3, (%2, 0) \n" \ + "10: ldw %4, (%2, 4) \n" \ + " stw %3, (%1, 0) \n" \ + " stw %4, (%1, 4) \n" \ + "11: ldw %3, (%2, 8) \n" \ + "12: ldw %4, (%2, 12) \n" \ + " stw %3, (%1, 8) \n" \ + " stw %4, (%1, 12) \n" \ + " addi %2, 16 \n" \ + " addi %1, 16 \n" \ + " subi %0, 16 \n" \ + " br 1b \n" \ + "3: cmplti %0, 4 \n" \ + " bt 5f \n" \ + "4: ldw %3, (%2, 0) \n" \ + " stw %3, (%1, 0) \n" \ + " addi %2, 4 \n" \ + " addi %1, 4 \n" \ + " subi %0, 4 \n" \ + " br 3b \n" \ + "5: cmpnei %0, 0 \n" \ + " bf 7f \n" \ + "6: ldb %3, (%2, 0) \n" \ + " stb %3, (%1, 0) \n" \ + " addi %2, 1 \n" \ + " addi %1, 1 \n" \ + " subi %0, 1 \n" \ + " br 5b \n" \ + "8: stw %3, (%1, 0) \n" \ + " subi %0, 4 \n" \ + " bf 7f \n" \ + "9: subi %0, 8 \n" \ + " bf 7f \n" \ + "13: stw %3, (%1, 8) \n" \ + " subi %0, 12 \n" \ + " bf 7f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 2b, 7f \n" \ + ".long 4b, 7f \n" \ + ".long 6b, 7f \n" \ + ".long 10b, 8b \n" \ + ".long 11b, 9b \n" \ + ".long 12b,13b \n" \ + ".previous \n" \ + "7: \n" \ + : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \ + "=r"(tmp) \ + : "0"(n), "1"(to), "2"(from) \ + : "memory"); \ +} while (0) + +unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n); +unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n); + +unsigned long clear_user(void *to, unsigned long n); +unsigned long __clear_user(void __user *to, unsigned long n); + +long strncpy_from_user(char *dst, const char *src, long count); +long __strncpy_from_user(char *dst, const char *src, long count); + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 on exception, a value greater than N if too long + */ +long strnlen_user(const char *src, long n); + +#define strlen_user(str) strnlen_user(str, 32767) + +struct exception_table_entry { + unsigned long insn; + unsigned long nextinsn; +}; + +extern int fixup_exception(struct pt_regs *regs); + +#endif /* __ASM_CSKY_UACCESS_H */ diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h new file mode 100644 index 000000000..da7a18295 --- /dev/null +++ b/arch/csky/include/asm/unistd.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <uapi/asm/unistd.h> + +#define NR_syscalls (__NR_syscalls) diff --git a/arch/csky/include/asm/uprobes.h b/arch/csky/include/asm/uprobes.h new file mode 100644 index 000000000..600388eb9 --- /dev/null +++ b/arch/csky/include/asm/uprobes.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_UPROBES_H +#define __ASM_CSKY_UPROBES_H + +#include <asm/probes.h> + +#define MAX_UINSN_BYTES 4 + +#define UPROBE_SWBP_INSN USR_BKPT +#define UPROBE_SWBP_INSN_SIZE 2 +#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES + +typedef u32 uprobe_opcode_t; + +struct arch_uprobe_task { + unsigned long saved_trap_no; +}; + +struct arch_uprobe { + union { + u8 insn[MAX_UINSN_BYTES]; + u8 ixol[MAX_UINSN_BYTES]; + }; + struct arch_probe_insn api; + unsigned long insn_size; + bool simulate; +}; + +int uprobe_breakpoint_handler(struct pt_regs *regs); +int uprobe_single_step_handler(struct pt_regs *regs); + +#endif /* __ASM_CSKY_UPROBES_H */ diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h new file mode 100644 index 000000000..d963d691f --- /dev/null +++ b/arch/csky/include/asm/vdso.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_VDSO_H +#define __ASM_CSKY_VDSO_H + +#include <abi/vdso.h> + +struct csky_vdso { + unsigned short rt_signal_retcode[4]; +}; + +#endif /* __ASM_CSKY_VDSO_H */ diff --git a/arch/csky/include/asm/vmalloc.h b/arch/csky/include/asm/vmalloc.h new file mode 100644 index 000000000..43dca6336 --- /dev/null +++ b/arch/csky/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_CSKY_VMALLOC_H +#define _ASM_CSKY_VMALLOC_H + +#endif /* _ASM_CSKY_VMALLOC_H */ diff --git a/arch/csky/include/uapi/asm/Kbuild b/arch/csky/include/uapi/asm/Kbuild new file mode 100644 index 000000000..e78470141 --- /dev/null +++ b/arch/csky/include/uapi/asm/Kbuild @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += ucontext.h diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h new file mode 100644 index 000000000..d150cd664 --- /dev/null +++ b/arch/csky/include/uapi/asm/byteorder.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_BYTEORDER_H +#define __ASM_CSKY_BYTEORDER_H + +#include <linux/byteorder/little_endian.h> + +#endif /* __ASM_CSKY_BYTEORDER_H */ diff --git a/arch/csky/include/uapi/asm/cachectl.h b/arch/csky/include/uapi/asm/cachectl.h new file mode 100644 index 000000000..ed7fad1ea --- /dev/null +++ b/arch/csky/include/uapi/asm/cachectl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef __ASM_CSKY_CACHECTL_H +#define __ASM_CSKY_CACHECTL_H + +/* + * See "man cacheflush" + */ +#define ICACHE (1<<0) +#define DCACHE (1<<1) +#define BCACHE (ICACHE|DCACHE) + +#endif /* __ASM_CSKY_CACHECTL_H */ diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000..49d4e147a --- /dev/null +++ b/arch/csky/include/uapi/asm/perf_regs.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef _ASM_CSKY_PERF_REGS_H +#define _ASM_CSKY_PERF_REGS_H + +/* Index of struct pt_regs */ +enum perf_event_csky_regs { + PERF_REG_CSKY_TLS, + PERF_REG_CSKY_LR, + PERF_REG_CSKY_PC, + PERF_REG_CSKY_SR, + PERF_REG_CSKY_SP, + PERF_REG_CSKY_ORIG_A0, + PERF_REG_CSKY_A0, + PERF_REG_CSKY_A1, + PERF_REG_CSKY_A2, + PERF_REG_CSKY_A3, + PERF_REG_CSKY_REGS0, + PERF_REG_CSKY_REGS1, + PERF_REG_CSKY_REGS2, + PERF_REG_CSKY_REGS3, + PERF_REG_CSKY_REGS4, + PERF_REG_CSKY_REGS5, + PERF_REG_CSKY_REGS6, + PERF_REG_CSKY_REGS7, + PERF_REG_CSKY_REGS8, + PERF_REG_CSKY_REGS9, +#if defined(__CSKYABIV2__) + PERF_REG_CSKY_EXREGS0, + PERF_REG_CSKY_EXREGS1, + PERF_REG_CSKY_EXREGS2, + PERF_REG_CSKY_EXREGS3, + PERF_REG_CSKY_EXREGS4, + PERF_REG_CSKY_EXREGS5, + PERF_REG_CSKY_EXREGS6, + PERF_REG_CSKY_EXREGS7, + PERF_REG_CSKY_EXREGS8, + PERF_REG_CSKY_EXREGS9, + PERF_REG_CSKY_EXREGS10, + PERF_REG_CSKY_EXREGS11, + PERF_REG_CSKY_EXREGS12, + PERF_REG_CSKY_EXREGS13, + PERF_REG_CSKY_EXREGS14, + PERF_REG_CSKY_HI, + PERF_REG_CSKY_LO, + PERF_REG_CSKY_DCSR, +#endif + PERF_REG_CSKY_MAX, +}; +#endif /* _ASM_CSKY_PERF_REGS_H */ diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h new file mode 100644 index 000000000..66b2268e3 --- /dev/null +++ b/arch/csky/include/uapi/asm/ptrace.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef _CSKY_PTRACE_H +#define _CSKY_PTRACE_H + +#ifndef __ASSEMBLY__ + +struct pt_regs { + unsigned long tls; + unsigned long lr; + unsigned long pc; + unsigned long sr; + unsigned long usp; + + /* + * a0, a1, a2, a3: + * abiv1: r2, r3, r4, r5 + * abiv2: r0, r1, r2, r3 + */ + unsigned long orig_a0; + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + + /* + * ABIV2: r4 ~ r13 + * ABIV1: r6 ~ r14, r1 + */ + unsigned long regs[10]; + +#if defined(__CSKYABIV2__) + /* r16 ~ r30 */ + unsigned long exregs[15]; + + unsigned long rhi; + unsigned long rlo; + unsigned long dcsr; +#endif +}; + +struct user_fp { + unsigned long vr[96]; + unsigned long fcr; + unsigned long fesr; + unsigned long fid; + unsigned long reserved; +}; + +#endif /* __ASSEMBLY__ */ +#endif /* _CSKY_PTRACE_H */ diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000..670c020f2 --- /dev/null +++ b/arch/csky/include/uapi/asm/sigcontext.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#ifndef __ASM_CSKY_SIGCONTEXT_H +#define __ASM_CSKY_SIGCONTEXT_H + +#include <asm/ptrace.h> + +struct sigcontext { + struct pt_regs sc_pt_regs; + struct user_fp sc_user_fp; +}; + +#endif /* __ASM_CSKY_SIGCONTEXT_H */ diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h new file mode 100644 index 000000000..ba4018929 --- /dev/null +++ b/arch/csky/include/uapi/asm/unistd.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_TIME32_SYSCALLS +#include <asm-generic/unistd.h> + +#define __NR_set_thread_area (__NR_arch_specific_syscall + 0) +__SYSCALL(__NR_set_thread_area, sys_set_thread_area) +#define __NR_cacheflush (__NR_arch_specific_syscall + 1) +__SYSCALL(__NR_cacheflush, sys_cacheflush) diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile new file mode 100644 index 000000000..37f37c0e9 --- /dev/null +++ b/arch/csky/kernel/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +extra-y := head.o vmlinux.lds + +obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o +obj-y += power.o syscall.o syscall_table.o setup.o +obj-y += process.o cpu-probe.o ptrace.o stacktrace.o +obj-y += probes/ + +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o +obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o +obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +endif diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c new file mode 100644 index 000000000..17479860d --- /dev/null +++ b/arch/csky/kernel/asm-offsets.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/sched.h> +#include <linux/kernel_stat.h> +#include <linux/kbuild.h> +#include <abi/regdef.h> + +int main(void) +{ + /* offsets into the task struct */ + DEFINE(TASK_STATE, offsetof(struct task_struct, state)); + DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); + DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); + DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); + DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); + DEFINE(TASK_MM, offsetof(struct task_struct, mm)); + DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + + /* offsets into the thread struct */ + DEFINE(THREAD_KSP, offsetof(struct thread_struct, sp)); + DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr)); + DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr)); + DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr)); + + /* offsets into the thread_info struct */ + DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TINFO_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TINFO_TP_VALUE, offsetof(struct thread_info, tp_value)); + DEFINE(TINFO_TASK, offsetof(struct thread_info, task)); + + /* offsets into the pt_regs */ + DEFINE(PT_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_ORIG_AO, offsetof(struct pt_regs, orig_a0)); + DEFINE(PT_SR, offsetof(struct pt_regs, sr)); + + DEFINE(PT_A0, offsetof(struct pt_regs, a0)); + DEFINE(PT_A1, offsetof(struct pt_regs, a1)); + DEFINE(PT_A2, offsetof(struct pt_regs, a2)); + DEFINE(PT_A3, offsetof(struct pt_regs, a3)); + DEFINE(PT_REGS0, offsetof(struct pt_regs, regs[0])); + DEFINE(PT_REGS1, offsetof(struct pt_regs, regs[1])); + DEFINE(PT_REGS2, offsetof(struct pt_regs, regs[2])); + DEFINE(PT_REGS3, offsetof(struct pt_regs, regs[3])); + DEFINE(PT_REGS4, offsetof(struct pt_regs, regs[4])); + DEFINE(PT_REGS5, offsetof(struct pt_regs, regs[5])); + DEFINE(PT_REGS6, offsetof(struct pt_regs, regs[6])); + DEFINE(PT_REGS7, offsetof(struct pt_regs, regs[7])); + DEFINE(PT_REGS8, offsetof(struct pt_regs, regs[8])); + DEFINE(PT_REGS9, offsetof(struct pt_regs, regs[9])); + DEFINE(PT_R15, offsetof(struct pt_regs, lr)); +#if defined(__CSKYABIV2__) + DEFINE(PT_R16, offsetof(struct pt_regs, exregs[0])); + DEFINE(PT_R17, offsetof(struct pt_regs, exregs[1])); + DEFINE(PT_R18, offsetof(struct pt_regs, exregs[2])); + DEFINE(PT_R19, offsetof(struct pt_regs, exregs[3])); + DEFINE(PT_R20, offsetof(struct pt_regs, exregs[4])); + DEFINE(PT_R21, offsetof(struct pt_regs, exregs[5])); + DEFINE(PT_R22, offsetof(struct pt_regs, exregs[6])); + DEFINE(PT_R23, offsetof(struct pt_regs, exregs[7])); + DEFINE(PT_R24, offsetof(struct pt_regs, exregs[8])); + DEFINE(PT_R25, offsetof(struct pt_regs, exregs[9])); + DEFINE(PT_R26, offsetof(struct pt_regs, exregs[10])); + DEFINE(PT_R27, offsetof(struct pt_regs, exregs[11])); + DEFINE(PT_R28, offsetof(struct pt_regs, exregs[12])); + DEFINE(PT_R29, offsetof(struct pt_regs, exregs[13])); + DEFINE(PT_R30, offsetof(struct pt_regs, exregs[14])); + DEFINE(PT_R31, offsetof(struct pt_regs, exregs[15])); + DEFINE(PT_RHI, offsetof(struct pt_regs, rhi)); + DEFINE(PT_RLO, offsetof(struct pt_regs, rlo)); +#endif + DEFINE(PT_USP, offsetof(struct pt_regs, usp)); + DEFINE(PT_FRAME_SIZE, sizeof(struct pt_regs)); + + /* offsets into the irq_cpustat_t struct */ + DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, + __softirq_pending)); + + /* signal defines */ + DEFINE(SIGSEGV, SIGSEGV); + DEFINE(SIGTRAP, SIGTRAP); + + return 0; +} diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S new file mode 100644 index 000000000..3821ef9b7 --- /dev/null +++ b/arch/csky/kernel/atomic.S @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include <abi/entry.h> + +.text + +/* + * int csky_cmpxchg(int oldval, int newval, int *ptr) + * + * If *ptr != oldval && return 1, + * else *ptr = newval return 0. + */ +ENTRY(csky_cmpxchg) + USPTOKSP + mfcr a3, epc + addi a3, TRAP0_SIZE + + subi sp, 16 + stw a3, (sp, 0) + mfcr a3, epsr + stw a3, (sp, 4) + mfcr a3, usp + stw a3, (sp, 8) + + psrset ee +#ifdef CONFIG_CPU_HAS_LDSTEX +1: + ldex a3, (a2) + cmpne a0, a3 + bt16 2f + mov a3, a1 + stex a3, (a2) + bez a3, 1b +2: + sync.is +#else +1: + ldw a3, (a2) + cmpne a0, a3 + bt16 3f +2: + stw a1, (a2) +3: +#endif + mvc a0 + ldw a3, (sp, 0) + mtcr a3, epc + ldw a3, (sp, 4) + mtcr a3, epsr + ldw a3, (sp, 8) + mtcr a3, usp + addi sp, 16 + KSPTOUSP + rte +END(csky_cmpxchg) + +#ifndef CONFIG_CPU_HAS_LDSTEX +/* + * Called from tlbmodified exception + */ +ENTRY(csky_cmpxchg_fixup) + mfcr a0, epc + lrw a1, 2b + cmpne a1, a0 + bt 1f + subi a1, (2b - 1b) + stw a1, (sp, LSAVE_PC) +1: + rts +END(csky_cmpxchg_fixup) +#endif diff --git a/arch/csky/kernel/cpu-probe.c b/arch/csky/kernel/cpu-probe.c new file mode 100644 index 000000000..5f15ca31d --- /dev/null +++ b/arch/csky/kernel/cpu-probe.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/of.h> +#include <linux/init.h> +#include <linux/seq_file.h> +#include <linux/memblock.h> + +#include <abi/reg_ops.h> + +static void percpu_print(void *arg) +{ + struct seq_file *m = (struct seq_file *)arg; + unsigned int cur, next, i; + + seq_printf(m, "processor : %d\n", smp_processor_id()); + seq_printf(m, "C-SKY CPU model : %s\n", CSKYCPU_DEF_NAME); + + /* read processor id, max is 100 */ + cur = mfcr("cr13"); + for (i = 0; i < 100; i++) { + seq_printf(m, "product info[%d] : 0x%08x\n", i, cur); + + next = mfcr("cr13"); + + /* some CPU only has one id reg */ + if (cur == next) + break; + + cur = next; + + /* cpid index is 31-28, reset */ + if (!(next >> 28)) { + while ((mfcr("cr13") >> 28) != i); + break; + } + } + + /* CPU feature regs, setup by bootloader or gdbinit */ + seq_printf(m, "hint (CPU funcs): 0x%08x\n", mfcr_hint()); + seq_printf(m, "ccr (L1C & MMU): 0x%08x\n", mfcr("cr18")); + seq_printf(m, "ccr2 (L2C) : 0x%08x\n", mfcr_ccr2()); + seq_printf(m, "\n"); +} + +static int c_show(struct seq_file *m, void *v) +{ + int cpu; + + for_each_online_cpu(cpu) + smp_call_function_single(cpu, percpu_print, m, true); + +#ifdef CSKY_ARCH_VERSION + seq_printf(m, "arch-version : %s\n", CSKY_ARCH_VERSION); + seq_printf(m, "\n"); +#endif + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void c_stop(struct seq_file *m, void *v) {} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = c_show, +}; diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S new file mode 100644 index 000000000..5a5cabd07 --- /dev/null +++ b/arch/csky/kernel/entry.S @@ -0,0 +1,378 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include <abi/entry.h> +#include <abi/pgtable-bits.h> +#include <asm/errno.h> +#include <asm/setup.h> +#include <asm/unistd.h> +#include <asm/asm-offsets.h> +#include <linux/threads.h> +#include <asm/setup.h> +#include <asm/page.h> +#include <asm/thread_info.h> + +#define PTE_INDX_MSK 0xffc +#define PTE_INDX_SHIFT 10 +#define _PGDIR_SHIFT 22 + +.macro zero_fp +#ifdef CONFIG_STACKTRACE + movi r8, 0 +#endif +.endm + +.macro context_tracking +#ifdef CONFIG_CONTEXT_TRACKING + mfcr a0, epsr + btsti a0, 31 + bt 1f + jbsr context_tracking_user_exit + ldw a0, (sp, LSAVE_A0) + ldw a1, (sp, LSAVE_A1) + ldw a2, (sp, LSAVE_A2) + ldw a3, (sp, LSAVE_A3) +#if defined(__CSKYABIV1__) + ldw r6, (sp, LSAVE_A4) + ldw r7, (sp, LSAVE_A5) +#endif +1: +#endif +.endm + +.macro tlbop_begin name, val0, val1, val2 +ENTRY(csky_\name) + mtcr a3, ss2 + mtcr r6, ss3 + mtcr a2, ss4 + + RD_PGDR r6 + RD_MEH a3 +#ifdef CONFIG_CPU_HAS_TLBI + tlbi.vaas a3 + sync.is + + btsti a3, 31 + bf 1f + RD_PGDR_K r6 +1: +#else + bgeni a2, 31 + WR_MCIR a2 + bgeni a2, 25 + WR_MCIR a2 +#endif + bclri r6, 0 + lrw a2, va_pa_offset + ld.w a2, (a2, 0) + subu r6, a2 + bseti r6, 31 + + mov a2, a3 + lsri a2, _PGDIR_SHIFT + lsli a2, 2 + addu r6, a2 + ldw r6, (r6) + + lrw a2, va_pa_offset + ld.w a2, (a2, 0) + subu r6, a2 + bseti r6, 31 + + lsri a3, PTE_INDX_SHIFT + lrw a2, PTE_INDX_MSK + and a3, a2 + addu r6, a3 + ldw a3, (r6) + + movi a2, (_PAGE_PRESENT | \val0) + and a3, a2 + cmpne a3, a2 + bt \name + + /* First read/write the page, just update the flags */ + ldw a3, (r6) + bgeni a2, PAGE_VALID_BIT + bseti a2, PAGE_ACCESSED_BIT + bseti a2, \val1 + bseti a2, \val2 + or a3, a2 + stw a3, (r6) + + /* Some cpu tlb-hardrefill bypass the cache */ +#ifdef CONFIG_CPU_NEED_TLBSYNC + movi a2, 0x22 + bseti a2, 6 + mtcr r6, cr22 + mtcr a2, cr17 + sync +#endif + + mfcr a3, ss2 + mfcr r6, ss3 + mfcr a2, ss4 + rte +\name: + mfcr a3, ss2 + mfcr r6, ss3 + mfcr a2, ss4 + SAVE_ALL 0 +.endm +.macro tlbop_end is_write + zero_fp + context_tracking + RD_MEH a2 + psrset ee, ie + mov a0, sp + movi a1, \is_write + jbsr do_page_fault + jmpi ret_from_exception +.endm + +.text + +tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT +tlbop_end 0 + +tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT +tlbop_end 1 + +tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT +#ifndef CONFIG_CPU_HAS_LDSTEX +jbsr csky_cmpxchg_fixup +#endif +tlbop_end 1 + +ENTRY(csky_systemcall) + SAVE_ALL TRAP0_SIZE + zero_fp + context_tracking + psrset ee, ie + + lrw r9, __NR_syscalls + cmphs syscallid, r9 /* Check nr of syscall */ + bt 1f + + lrw r9, sys_call_table + ixw r9, syscallid + ldw syscallid, (r9) + cmpnei syscallid, 0 + bf ret_from_exception + + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + ldw r10, (r9, TINFO_FLAGS) + lrw r9, _TIF_SYSCALL_WORK + and r10, r9 + cmpnei r10, 0 + bt csky_syscall_trace +#if defined(__CSKYABIV2__) + subi sp, 8 + stw r5, (sp, 0x4) + stw r4, (sp, 0x0) + jsr syscallid /* Do system call */ + addi sp, 8 +#else + jsr syscallid +#endif + stw a0, (sp, LSAVE_A0) /* Save return value */ +1: +#ifdef CONFIG_DEBUG_RSEQ + mov a0, sp + jbsr rseq_syscall +#endif + jmpi ret_from_exception + +csky_syscall_trace: + mov a0, sp /* sp = pt_regs pointer */ + jbsr syscall_trace_enter + cmpnei a0, 0 + bt 1f + /* Prepare args before do system call */ + ldw a0, (sp, LSAVE_A0) + ldw a1, (sp, LSAVE_A1) + ldw a2, (sp, LSAVE_A2) + ldw a3, (sp, LSAVE_A3) +#if defined(__CSKYABIV2__) + subi sp, 8 + ldw r9, (sp, LSAVE_A4) + stw r9, (sp, 0x0) + ldw r9, (sp, LSAVE_A5) + stw r9, (sp, 0x4) + jsr syscallid /* Do system call */ + addi sp, 8 +#else + ldw r6, (sp, LSAVE_A4) + ldw r7, (sp, LSAVE_A5) + jsr syscallid /* Do system call */ +#endif + stw a0, (sp, LSAVE_A0) /* Save return value */ + +1: +#ifdef CONFIG_DEBUG_RSEQ + mov a0, sp + jbsr rseq_syscall +#endif + mov a0, sp /* right now, sp --> pt_regs */ + jbsr syscall_trace_exit + br ret_from_exception + +ENTRY(ret_from_kernel_thread) + jbsr schedule_tail + mov a0, r10 + jsr r9 + jbsr ret_from_exception + +ENTRY(ret_from_fork) + jbsr schedule_tail + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + ldw r10, (r9, TINFO_FLAGS) + lrw r9, _TIF_SYSCALL_WORK + and r10, r9 + cmpnei r10, 0 + bf ret_from_exception + mov a0, sp /* sp = pt_regs pointer */ + jbsr syscall_trace_exit + +ret_from_exception: + psrclr ie + ld r9, (sp, LSAVE_PSR) + btsti r9, 31 + + bt 1f + /* + * Load address of current->thread_info, Then get address of task_struct + * Get task_needreshed in task_struct + */ + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + + ldw r10, (r9, TINFO_FLAGS) + lrw r9, _TIF_WORK_MASK + and r10, r9 + cmpnei r10, 0 + bt exit_work +#ifdef CONFIG_CONTEXT_TRACKING + jbsr context_tracking_user_enter +#endif +1: +#ifdef CONFIG_PREEMPTION + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + + ldw r10, (r9, TINFO_PREEMPT) + cmpnei r10, 0 + bt 2f + jbsr preempt_schedule_irq /* irq en/disable is done inside */ +2: +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS + ld r10, (sp, LSAVE_PSR) + btsti r10, 6 + bf 2f + jbsr trace_hardirqs_on +2: +#endif + RESTORE_ALL + +exit_work: + lrw r9, ret_from_exception + mov lr, r9 + + btsti r10, TIF_NEED_RESCHED + bt work_resched + + psrset ie + mov a0, sp + mov a1, r10 + jmpi do_notify_resume + +work_resched: + jmpi schedule + +ENTRY(csky_trap) + SAVE_ALL 0 + zero_fp + context_tracking + psrset ee + mov a0, sp /* Push Stack pointer arg */ + jbsr trap_c /* Call C-level trap handler */ + jmpi ret_from_exception + +/* + * Prototype from libc for abiv1: + * register unsigned int __result asm("a0"); + * asm( "trap 3" :"=r"(__result)::); + */ +ENTRY(csky_get_tls) + USPTOKSP + + /* increase epc for continue */ + mfcr a0, epc + addi a0, TRAP0_SIZE + mtcr a0, epc + + /* get current task thread_info with kernel 8K stack */ + bmaski a0, THREAD_SHIFT + not a0 + subi sp, 1 + and a0, sp + addi sp, 1 + + /* get tls */ + ldw a0, (a0, TINFO_TP_VALUE) + + KSPTOUSP + rte + +ENTRY(csky_irq) + SAVE_ALL 0 + zero_fp + context_tracking + psrset ee + +#ifdef CONFIG_TRACE_IRQFLAGS + jbsr trace_hardirqs_off +#endif + + + mov a0, sp + jbsr csky_do_IRQ + + jmpi ret_from_exception + +/* + * a0 = prev task_struct * + * a1 = next task_struct * + * a0 = return next + */ +ENTRY(__switch_to) + lrw a3, TASK_THREAD + addu a3, a0 + + SAVE_SWITCH_STACK + + stw sp, (a3, THREAD_KSP) + + /* Set up next process to run */ + lrw a3, TASK_THREAD + addu a3, a1 + + ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ + +#if defined(__CSKYABIV2__) + addi a3, a1, TASK_THREAD_INFO + ldw tls, (a3, TINFO_TP_VALUE) +#endif + + RESTORE_SWITCH_STACK + + rts +ENDPROC(__switch_to) diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c new file mode 100644 index 000000000..b4a7ec151 --- /dev/null +++ b/arch/csky/kernel/ftrace.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/ftrace.h> +#include <linux/uaccess.h> +#include <linux/stop_machine.h> +#include <asm/cacheflush.h> + +#ifdef CONFIG_DYNAMIC_FTRACE + +#define NOP 0x4000 +#define NOP32_HI 0xc400 +#define NOP32_LO 0x4820 +#define PUSH_LR 0x14d0 +#define MOVIH_LINK 0xea3a +#define ORI_LINK 0xef5a +#define JSR_LINK 0xe8fa +#define BSR_LINK 0xe000 + +/* + * Gcc-csky with -pg will insert stub in function prologue: + * push lr + * jbsr _mcount + * nop32 + * nop32 + * + * If the (callee - current_pc) is less then 64MB, we'll use bsr: + * push lr + * bsr _mcount + * nop32 + * nop32 + * else we'll use (movih + ori + jsr): + * push lr + * movih r26, ... + * ori r26, ... + * jsr r26 + * + * (r26 is our reserved link-reg) + * + */ +static inline void make_jbsr(unsigned long callee, unsigned long pc, + uint16_t *call, bool nolr) +{ + long offset; + + call[0] = nolr ? NOP : PUSH_LR; + + offset = (long) callee - (long) pc; + + if (unlikely(offset < -67108864 || offset > 67108864)) { + call[1] = MOVIH_LINK; + call[2] = callee >> 16; + call[3] = ORI_LINK; + call[4] = callee & 0xffff; + call[5] = JSR_LINK; + call[6] = 0; + } else { + offset = offset >> 1; + + call[1] = BSR_LINK | + ((uint16_t)((unsigned long) offset >> 16) & 0x3ff); + call[2] = (uint16_t)((unsigned long) offset & 0xffff); + call[3] = call[5] = NOP32_HI; + call[4] = call[6] = NOP32_LO; + } +} + +static uint16_t nops[7] = {NOP, NOP32_HI, NOP32_LO, NOP32_HI, NOP32_LO, + NOP32_HI, NOP32_LO}; +static int ftrace_check_current_nop(unsigned long hook) +{ + uint16_t olds[7]; + unsigned long hook_pos = hook - 2; + + if (copy_from_kernel_nofault((void *)olds, (void *)hook_pos, + sizeof(nops))) + return -EFAULT; + + if (memcmp((void *)nops, (void *)olds, sizeof(nops))) { + pr_err("%p: nop but get (%04x %04x %04x %04x %04x %04x %04x)\n", + (void *)hook_pos, + olds[0], olds[1], olds[2], olds[3], olds[4], olds[5], + olds[6]); + + return -EINVAL; + } + + return 0; +} + +static int ftrace_modify_code(unsigned long hook, unsigned long target, + bool enable, bool nolr) +{ + uint16_t call[7]; + + unsigned long hook_pos = hook - 2; + int ret = 0; + + make_jbsr(target, hook, call, nolr); + + ret = copy_to_kernel_nofault((void *)hook_pos, enable ? call : nops, + sizeof(nops)); + if (ret) + return -EPERM; + + flush_icache_range(hook_pos, hook_pos + MCOUNT_INSN_SIZE); + + return 0; +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + int ret = ftrace_check_current_nop(rec->ip); + + if (ret) + return ret; + + return ftrace_modify_code(rec->ip, addr, true, false); +} + +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + return ftrace_modify_code(rec->ip, addr, false, false); +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + int ret = ftrace_modify_code((unsigned long)&ftrace_call, + (unsigned long)func, true, true); + if (!ret) + ret = ftrace_modify_code((unsigned long)&ftrace_regs_call, + (unsigned long)func, true, true); + return ret; +} + +int __init ftrace_dyn_arch_init(void) +{ + return 0; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + return ftrace_modify_code(rec->ip, addr, true, true); +} +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long old; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + old = *parent; + + if (!function_graph_enter(old, self_addr, + *(unsigned long *)frame_pointer, parent)) { + /* + * For csky-gcc function has sub-call: + * subi sp, sp, 8 + * stw r8, (sp, 0) + * mov r8, sp + * st.w r15, (sp, 0x4) + * push r15 + * jl _mcount + * We only need set *parent for resume + * + * For csky-gcc function has no sub-call: + * subi sp, sp, 4 + * stw r8, (sp, 0) + * mov r8, sp + * push r15 + * jl _mcount + * We need set *parent and *(frame_pointer + 4) for resume, + * because lr is resumed twice. + */ + *parent = return_hooker; + frame_pointer += 4; + if (*(unsigned long *)frame_pointer == old) + *(unsigned long *)frame_pointer = return_hooker; + } +} + +#ifdef CONFIG_DYNAMIC_FTRACE +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_code((unsigned long)&ftrace_graph_call, + (unsigned long)&ftrace_graph_caller, true, true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_code((unsigned long)&ftrace_graph_call, + (unsigned long)&ftrace_graph_caller, false, true); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_DYNAMIC_FTRACE +#ifndef CONFIG_CPU_HAS_ICACHE_INS +struct ftrace_modify_param { + int command; + atomic_t cpu_count; +}; + +static int __ftrace_modify_code(void *data) +{ + struct ftrace_modify_param *param = data; + + if (atomic_inc_return(¶m->cpu_count) == 1) { + ftrace_modify_all_code(param->command); + atomic_inc(¶m->cpu_count); + } else { + while (atomic_read(¶m->cpu_count) <= num_online_cpus()) + cpu_relax(); + local_icache_inv_all(NULL); + } + + return 0; +} + +void arch_ftrace_update_code(int command) +{ + struct ftrace_modify_param param = { command, ATOMIC_INIT(0) }; + + stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask); +} +#endif +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* _mcount is defined in abi's mcount.S */ +EXPORT_SYMBOL(_mcount); diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S new file mode 100644 index 000000000..17ed9d250 --- /dev/null +++ b/arch/csky/kernel/head.S @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/linkage.h> +#include <linux/init.h> +#include <asm/page.h> +#include <abi/entry.h> + +__HEAD +ENTRY(_start) + SETUP_MMU + + /* set stack point */ + lrw r6, init_thread_union + THREAD_SIZE + mov sp, r6 + + jmpi csky_start +END(_start) + +#ifdef CONFIG_SMP +.align 10 +ENTRY(_start_smp_secondary) + SETUP_MMU + + /* copy msa1 from CPU0 */ + lrw r6, secondary_msa1 + ld.w r6, (r6, 0) + mtcr r6, cr<31, 15> + + /* set stack point */ + lrw r6, secondary_stack + ld.w r6, (r6, 0) + mov sp, r6 + + jmpi csky_start_secondary +END(_start_smp_secondary) +#endif diff --git a/arch/csky/kernel/irq.c b/arch/csky/kernel/irq.c new file mode 100644 index 000000000..03a1930f1 --- /dev/null +++ b/arch/csky/kernel/irq.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <asm/traps.h> +#include <asm/smp.h> + +void __init init_IRQ(void) +{ + irqchip_init(); +#ifdef CONFIG_SMP + setup_smp_ipi(); +#endif +} + +asmlinkage void __irq_entry csky_do_IRQ(struct pt_regs *regs) +{ + handle_arch_irq(regs); +} diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c new file mode 100644 index 000000000..6cd82d69c --- /dev/null +++ b/arch/csky/kernel/module.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/moduleloader.h> +#include <linux/elf.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> + +#ifdef CONFIG_CPU_CK810 +#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000) +#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0) + +#define CHANGE_JSRI_TO_LRW(addr) do { \ + *(uint16_t *)(addr) = (*(uint16_t *)(addr) & 0xFF9F) | 0x001a; \ + *((uint16_t *)(addr) + 1) = *((uint16_t *)(addr) + 1) & 0xFFFF; \ +} while (0) + +#define SET_JSR32_R26(addr) do { \ + *(uint16_t *)(addr) = 0xE8Fa; \ + *((uint16_t *)(addr) + 1) = 0x0000; \ +} while (0) + +static void jsri_2_lrw_jsr(uint32_t *location) +{ + uint16_t *location_tmp = (uint16_t *)location; + + if (IS_BSR32(*location_tmp, *(location_tmp + 1))) + return; + + if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) { + /* jsri 0x... --> lrw r26, 0x... */ + CHANGE_JSRI_TO_LRW(location); + /* lsli r0, r0 --> jsr r26 */ + SET_JSR32_R26(location + 1); + } +} +#else +static void inline jsri_2_lrw_jsr(uint32_t *location) +{ + return; +} +#endif + +int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, struct module *me) +{ + unsigned int i; + Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + uint32_t *location; + short *temp; + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(rel[i].r_info); + + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_CSKY_32: + /* We add the value into the location given */ + *location = rel[i].r_addend + sym->st_value; + break; + case R_CSKY_PC32: + /* Add the value, subtract its postition */ + *location = rel[i].r_addend + sym->st_value + - (uint32_t)location; + break; + case R_CSKY_PCRELJSR_IMM11BY2: + break; + case R_CSKY_PCRELJSR_IMM26BY2: + jsri_2_lrw_jsr(location); + break; + case R_CSKY_ADDR_HI16: + temp = ((short *)location) + 1; + *temp = (short) + ((rel[i].r_addend + sym->st_value) >> 16); + break; + case R_CSKY_ADDR_LO16: + temp = ((short *)location) + 1; + *temp = (short) + ((rel[i].r_addend + sym->st_value) & 0xffff); + break; + default: + pr_err("module %s: Unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c new file mode 100644 index 000000000..75e1f9df5 --- /dev/null +++ b/arch/csky/kernel/perf_callchain.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/perf_event.h> +#include <linux/uaccess.h> + +/* Kernel callchain */ +struct stackframe { + unsigned long fp; + unsigned long lr; +}; + +static int unwind_frame_kernel(struct stackframe *frame) +{ + unsigned long low = (unsigned long)task_stack_page(current); + unsigned long high = low + THREAD_SIZE; + + if (unlikely(frame->fp < low || frame->fp > high)) + return -EPERM; + + if (kstack_end((void *)frame->fp) || frame->fp & 0x3) + return -EPERM; + + *frame = *(struct stackframe *)frame->fp; + + if (__kernel_text_address(frame->lr)) { + int graph = 0; + + frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr, + NULL); + } + return 0; +} + +static void notrace walk_stackframe(struct stackframe *fr, + struct perf_callchain_entry_ctx *entry) +{ + do { + perf_callchain_store(entry, fr->lr); + } while (unwind_frame_kernel(fr) >= 0); +} + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, + unsigned long fp, unsigned long reg_lr) +{ + struct stackframe buftail; + unsigned long lr = 0; + unsigned long __user *user_frame_tail = (unsigned long __user *)fp; + + /* Check accessibility of one struct frame_tail beyond */ + if (!access_ok(user_frame_tail, sizeof(buftail))) + return 0; + if (__copy_from_user_inatomic(&buftail, user_frame_tail, + sizeof(buftail))) + return 0; + + if (reg_lr != 0) + lr = reg_lr; + else + lr = buftail.lr; + + fp = buftail.fp; + perf_callchain_store(entry, lr); + + return fp; +} + +/* + * This will be called when the target is in user mode + * This function will only be called when we use + * "PERF_SAMPLE_CALLCHAIN" in + * kernel/events/core.c:perf_prepare_sample() + * + * How to trigger perf_callchain_[user/kernel] : + * $ perf record -e cpu-clock --call-graph fp ./program + * $ perf report --call-graph + * + * On C-SKY platform, the program being sampled and the C library + * need to be compiled with * -mbacktrace, otherwise the user + * stack will not contain function frame. + */ +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + unsigned long fp = 0; + + /* C-SKY does not support virtualization. */ + if (guest_cbs && guest_cbs->is_in_guest()) + return; + + fp = regs->regs[4]; + perf_callchain_store(entry, regs->pc); + + /* + * While backtrace from leaf function, lr is normally + * not saved inside frame on C-SKY, so get lr from pt_regs + * at the sample point. However, lr value can be incorrect if + * lr is used as temp register + */ + fp = user_backtrace(entry, fp, regs->lr); + + while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) + fp = user_backtrace(entry, fp, 0); +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + struct stackframe fr; + + /* C-SKY does not support virtualization. */ + if (guest_cbs && guest_cbs->is_in_guest()) { + pr_warn("C-SKY does not support perf in guest mode!"); + return; + } + + fr.fp = regs->regs[4]; + fr.lr = regs->lr; + walk_stackframe(&fr, entry); +} diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c new file mode 100644 index 000000000..1a29f1157 --- /dev/null +++ b/arch/csky/kernel/perf_event.c @@ -0,0 +1,1371 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> + +#define CSKY_PMU_MAX_EVENTS 32 +#define DEFAULT_COUNT_WIDTH 48 + +#define HPCR "<0, 0x0>" /* PMU Control reg */ +#define HPSPR "<0, 0x1>" /* Start PC reg */ +#define HPEPR "<0, 0x2>" /* End PC reg */ +#define HPSIR "<0, 0x3>" /* Soft Counter reg */ +#define HPCNTENR "<0, 0x4>" /* Count Enable reg */ +#define HPINTENR "<0, 0x5>" /* Interrupt Enable reg */ +#define HPOFSR "<0, 0x6>" /* Interrupt Status reg */ + +/* The events for a given PMU register set. */ +struct pmu_hw_events { + /* + * The events that are active on the PMU for the given index. + */ + struct perf_event *events[CSKY_PMU_MAX_EVENTS]; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + unsigned long used_mask[BITS_TO_LONGS(CSKY_PMU_MAX_EVENTS)]; +}; + +static uint64_t (*hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])(void); +static void (*hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])(uint64_t val); + +static struct csky_pmu_t { + struct pmu pmu; + struct pmu_hw_events __percpu *hw_events; + struct platform_device *plat_device; + uint32_t count_width; + uint32_t hpcr; + u64 max_period; +} csky_pmu; +static int csky_pmu_irq; + +#define to_csky_pmu(p) (container_of(p, struct csky_pmu, pmu)) + +#define cprgr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprgr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define cpwgr(reg, val) \ +({ \ + asm volatile( \ + "cpwgr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +#define cprcr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprcr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define cpwcr(reg, val) \ +({ \ + asm volatile( \ + "cpwcr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +/* cycle counter */ +static uint64_t csky_pmu_read_cc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x3>"); + lo = cprgr("<0, 0x2>"); + hi = cprgr("<0, 0x3>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cc(uint64_t val) +{ + cpwgr("<0, 0x2>", (uint32_t) val); + cpwgr("<0, 0x3>", (uint32_t) (val >> 32)); +} + +/* instruction counter */ +static uint64_t csky_pmu_read_ic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x5>"); + lo = cprgr("<0, 0x4>"); + hi = cprgr("<0, 0x5>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ic(uint64_t val) +{ + cpwgr("<0, 0x4>", (uint32_t) val); + cpwgr("<0, 0x5>", (uint32_t) (val >> 32)); +} + +/* l1 icache access counter */ +static uint64_t csky_pmu_read_icac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x7>"); + lo = cprgr("<0, 0x6>"); + hi = cprgr("<0, 0x7>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_icac(uint64_t val) +{ + cpwgr("<0, 0x6>", (uint32_t) val); + cpwgr("<0, 0x7>", (uint32_t) (val >> 32)); +} + +/* l1 icache miss counter */ +static uint64_t csky_pmu_read_icmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x9>"); + lo = cprgr("<0, 0x8>"); + hi = cprgr("<0, 0x9>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_icmc(uint64_t val) +{ + cpwgr("<0, 0x8>", (uint32_t) val); + cpwgr("<0, 0x9>", (uint32_t) (val >> 32)); +} + +/* l1 dcache access counter */ +static uint64_t csky_pmu_read_dcac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xb>"); + lo = cprgr("<0, 0xa>"); + hi = cprgr("<0, 0xb>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcac(uint64_t val) +{ + cpwgr("<0, 0xa>", (uint32_t) val); + cpwgr("<0, 0xb>", (uint32_t) (val >> 32)); +} + +/* l1 dcache miss counter */ +static uint64_t csky_pmu_read_dcmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xd>"); + lo = cprgr("<0, 0xc>"); + hi = cprgr("<0, 0xd>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcmc(uint64_t val) +{ + cpwgr("<0, 0xc>", (uint32_t) val); + cpwgr("<0, 0xd>", (uint32_t) (val >> 32)); +} + +/* l2 cache access counter */ +static uint64_t csky_pmu_read_l2ac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xf>"); + lo = cprgr("<0, 0xe>"); + hi = cprgr("<0, 0xf>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2ac(uint64_t val) +{ + cpwgr("<0, 0xe>", (uint32_t) val); + cpwgr("<0, 0xf>", (uint32_t) (val >> 32)); +} + +/* l2 cache miss counter */ +static uint64_t csky_pmu_read_l2mc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x11>"); + lo = cprgr("<0, 0x10>"); + hi = cprgr("<0, 0x11>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2mc(uint64_t val) +{ + cpwgr("<0, 0x10>", (uint32_t) val); + cpwgr("<0, 0x11>", (uint32_t) (val >> 32)); +} + +/* I-UTLB miss counter */ +static uint64_t csky_pmu_read_iutlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x15>"); + lo = cprgr("<0, 0x14>"); + hi = cprgr("<0, 0x15>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_iutlbmc(uint64_t val) +{ + cpwgr("<0, 0x14>", (uint32_t) val); + cpwgr("<0, 0x15>", (uint32_t) (val >> 32)); +} + +/* D-UTLB miss counter */ +static uint64_t csky_pmu_read_dutlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x17>"); + lo = cprgr("<0, 0x16>"); + hi = cprgr("<0, 0x17>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dutlbmc(uint64_t val) +{ + cpwgr("<0, 0x16>", (uint32_t) val); + cpwgr("<0, 0x17>", (uint32_t) (val >> 32)); +} + +/* JTLB miss counter */ +static uint64_t csky_pmu_read_jtlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x19>"); + lo = cprgr("<0, 0x18>"); + hi = cprgr("<0, 0x19>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_jtlbmc(uint64_t val) +{ + cpwgr("<0, 0x18>", (uint32_t) val); + cpwgr("<0, 0x19>", (uint32_t) (val >> 32)); +} + +/* software counter */ +static uint64_t csky_pmu_read_softc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1b>"); + lo = cprgr("<0, 0x1a>"); + hi = cprgr("<0, 0x1b>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_softc(uint64_t val) +{ + cpwgr("<0, 0x1a>", (uint32_t) val); + cpwgr("<0, 0x1b>", (uint32_t) (val >> 32)); +} + +/* conditional branch mispredict counter */ +static uint64_t csky_pmu_read_cbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1d>"); + lo = cprgr("<0, 0x1c>"); + hi = cprgr("<0, 0x1d>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cbmc(uint64_t val) +{ + cpwgr("<0, 0x1c>", (uint32_t) val); + cpwgr("<0, 0x1d>", (uint32_t) (val >> 32)); +} + +/* conditional branch instruction counter */ +static uint64_t csky_pmu_read_cbic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1f>"); + lo = cprgr("<0, 0x1e>"); + hi = cprgr("<0, 0x1f>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cbic(uint64_t val) +{ + cpwgr("<0, 0x1e>", (uint32_t) val); + cpwgr("<0, 0x1f>", (uint32_t) (val >> 32)); +} + +/* indirect branch mispredict counter */ +static uint64_t csky_pmu_read_ibmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x21>"); + lo = cprgr("<0, 0x20>"); + hi = cprgr("<0, 0x21>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ibmc(uint64_t val) +{ + cpwgr("<0, 0x20>", (uint32_t) val); + cpwgr("<0, 0x21>", (uint32_t) (val >> 32)); +} + +/* indirect branch instruction counter */ +static uint64_t csky_pmu_read_ibic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x23>"); + lo = cprgr("<0, 0x22>"); + hi = cprgr("<0, 0x23>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ibic(uint64_t val) +{ + cpwgr("<0, 0x22>", (uint32_t) val); + cpwgr("<0, 0x23>", (uint32_t) (val >> 32)); +} + +/* LSU spec fail counter */ +static uint64_t csky_pmu_read_lsfc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x25>"); + lo = cprgr("<0, 0x24>"); + hi = cprgr("<0, 0x25>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_lsfc(uint64_t val) +{ + cpwgr("<0, 0x24>", (uint32_t) val); + cpwgr("<0, 0x25>", (uint32_t) (val >> 32)); +} + +/* store instruction counter */ +static uint64_t csky_pmu_read_sic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x27>"); + lo = cprgr("<0, 0x26>"); + hi = cprgr("<0, 0x27>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_sic(uint64_t val) +{ + cpwgr("<0, 0x26>", (uint32_t) val); + cpwgr("<0, 0x27>", (uint32_t) (val >> 32)); +} + +/* dcache read access counter */ +static uint64_t csky_pmu_read_dcrac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x29>"); + lo = cprgr("<0, 0x28>"); + hi = cprgr("<0, 0x29>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcrac(uint64_t val) +{ + cpwgr("<0, 0x28>", (uint32_t) val); + cpwgr("<0, 0x29>", (uint32_t) (val >> 32)); +} + +/* dcache read miss counter */ +static uint64_t csky_pmu_read_dcrmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2b>"); + lo = cprgr("<0, 0x2a>"); + hi = cprgr("<0, 0x2b>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcrmc(uint64_t val) +{ + cpwgr("<0, 0x2a>", (uint32_t) val); + cpwgr("<0, 0x2b>", (uint32_t) (val >> 32)); +} + +/* dcache write access counter */ +static uint64_t csky_pmu_read_dcwac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2d>"); + lo = cprgr("<0, 0x2c>"); + hi = cprgr("<0, 0x2d>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcwac(uint64_t val) +{ + cpwgr("<0, 0x2c>", (uint32_t) val); + cpwgr("<0, 0x2d>", (uint32_t) (val >> 32)); +} + +/* dcache write miss counter */ +static uint64_t csky_pmu_read_dcwmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2f>"); + lo = cprgr("<0, 0x2e>"); + hi = cprgr("<0, 0x2f>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcwmc(uint64_t val) +{ + cpwgr("<0, 0x2e>", (uint32_t) val); + cpwgr("<0, 0x2f>", (uint32_t) (val >> 32)); +} + +/* l2cache read access counter */ +static uint64_t csky_pmu_read_l2rac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x31>"); + lo = cprgr("<0, 0x30>"); + hi = cprgr("<0, 0x31>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2rac(uint64_t val) +{ + cpwgr("<0, 0x30>", (uint32_t) val); + cpwgr("<0, 0x31>", (uint32_t) (val >> 32)); +} + +/* l2cache read miss counter */ +static uint64_t csky_pmu_read_l2rmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x33>"); + lo = cprgr("<0, 0x32>"); + hi = cprgr("<0, 0x33>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2rmc(uint64_t val) +{ + cpwgr("<0, 0x32>", (uint32_t) val); + cpwgr("<0, 0x33>", (uint32_t) (val >> 32)); +} + +/* l2cache write access counter */ +static uint64_t csky_pmu_read_l2wac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x35>"); + lo = cprgr("<0, 0x34>"); + hi = cprgr("<0, 0x35>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2wac(uint64_t val) +{ + cpwgr("<0, 0x34>", (uint32_t) val); + cpwgr("<0, 0x35>", (uint32_t) (val >> 32)); +} + +/* l2cache write miss counter */ +static uint64_t csky_pmu_read_l2wmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x37>"); + lo = cprgr("<0, 0x36>"); + hi = cprgr("<0, 0x37>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2wmc(uint64_t val) +{ + cpwgr("<0, 0x36>", (uint32_t) val); + cpwgr("<0, 0x37>", (uint32_t) (val >> 32)); +} + +#define HW_OP_UNSUPPORTED 0xffff +static const int csky_pmu_hw_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0x1, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x2, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xf, + [PERF_COUNT_HW_BRANCH_MISSES] = 0xe, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_REF_CPU_CYCLES] = HW_OP_UNSUPPORTED, +}; + +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xffff +static const int csky_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x5, + [C(RESULT_MISS)] = 0x6, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x14, + [C(RESULT_MISS)] = 0x15, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x16, + [C(RESULT_MISS)] = 0x17, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#endif + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x3, + [C(RESULT_MISS)] = 0x4, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x7, + [C(RESULT_MISS)] = 0x8, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x18, + [C(RESULT_MISS)] = 0x19, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x1a, + [C(RESULT_MISS)] = 0x1b, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#endif + }, + [C(DTLB)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x14, + [C(RESULT_MISS)] = 0xb, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x16, + [C(RESULT_MISS)] = 0xb, + }, +#endif + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x3, + [C(RESULT_MISS)] = 0xa, + }, +#endif + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +int csky_pmu_event_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64)csky_pmu.max_period) + left = csky_pmu.max_period; + + /* + * The hw event starts counting from this event offset, + * mark it to be able to extract future "deltas": + */ + local64_set(&hwc->prev_count, (u64)(-left)); + + if (hw_raw_write_mapping[hwc->idx] != NULL) + hw_raw_write_mapping[hwc->idx]((u64)(-left) & + csky_pmu.max_period); + + cpwcr(HPOFSR, ~BIT(hwc->idx) & cprcr(HPOFSR)); + + perf_event_update_userpage(event); + + return ret; +} + +static void csky_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc) +{ + uint64_t prev_raw_count = local64_read(&hwc->prev_count); + /* + * Sign extend count value to 64bit, otherwise delta calculation + * would be incorrect when overflow occurs. + */ + uint64_t new_raw_count = sign_extend64( + hw_raw_read_mapping[hwc->idx](), csky_pmu.count_width - 1); + int64_t delta = new_raw_count - prev_raw_count; + + /* + * We aren't afraid of hwc->prev_count changing beneath our feet + * because there's no way for us to re-enter this function anytime. + */ + local64_set(&hwc->prev_count, new_raw_count); + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static void csky_pmu_reset(void *info) +{ + cpwcr(HPCR, BIT(31) | BIT(30) | BIT(1)); +} + +static void csky_pmu_read(struct perf_event *event) +{ + csky_perf_event_update(event, &event->hw); +} + +static int csky_pmu_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + + cache_type = (config >> 0) & 0xff; + cache_op = (config >> 8) & 0xff; + cache_result = (config >> 16) & 0xff; + + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + return csky_pmu_cache_map[cache_type][cache_op][cache_result]; +} + +static int csky_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int ret; + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + if (event->attr.config >= PERF_COUNT_HW_MAX) + return -ENOENT; + ret = csky_pmu_hw_map[event->attr.config]; + if (ret == HW_OP_UNSUPPORTED) + return -ENOENT; + hwc->idx = ret; + break; + case PERF_TYPE_HW_CACHE: + ret = csky_pmu_cache_event(event->attr.config); + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + hwc->idx = ret; + break; + case PERF_TYPE_RAW: + if (hw_raw_read_mapping[event->attr.config] == NULL) + return -ENOENT; + hwc->idx = event->attr.config; + break; + default: + return -ENOENT; + } + + if (event->attr.exclude_user) + csky_pmu.hpcr = BIT(2); + else if (event->attr.exclude_kernel) + csky_pmu.hpcr = BIT(3); + else + csky_pmu.hpcr = BIT(2) | BIT(3); + + csky_pmu.hpcr |= BIT(1) | BIT(0); + + return 0; +} + +/* starts all counters */ +static void csky_pmu_enable(struct pmu *pmu) +{ + cpwcr(HPCR, csky_pmu.hpcr); +} + +/* stops all counters */ +static void csky_pmu_disable(struct pmu *pmu) +{ + cpwcr(HPCR, BIT(1)); +} + +static void csky_pmu_start(struct perf_event *event, int flags) +{ + unsigned long flg; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + csky_pmu_event_set_period(event); + + local_irq_save(flg); + + cpwcr(HPINTENR, BIT(idx) | cprcr(HPINTENR)); + cpwcr(HPCNTENR, BIT(idx) | cprcr(HPCNTENR)); + + local_irq_restore(flg); +} + +static void csky_pmu_stop_event(struct perf_event *event) +{ + unsigned long flg; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + local_irq_save(flg); + + cpwcr(HPINTENR, ~BIT(idx) & cprcr(HPINTENR)); + cpwcr(HPCNTENR, ~BIT(idx) & cprcr(HPCNTENR)); + + local_irq_restore(flg); +} + +static void csky_pmu_stop(struct perf_event *event, int flags) +{ + if (!(event->hw.state & PERF_HES_STOPPED)) { + csky_pmu_stop_event(event); + event->hw.state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && + !(event->hw.state & PERF_HES_UPTODATE)) { + csky_perf_event_update(event, &event->hw); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +static void csky_pmu_del(struct perf_event *event, int flags) +{ + struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events); + struct hw_perf_event *hwc = &event->hw; + + csky_pmu_stop(event, PERF_EF_UPDATE); + + hw_events->events[hwc->idx] = NULL; + + perf_event_update_userpage(event); +} + +/* allocate hardware counter and optionally start counting */ +static int csky_pmu_add(struct perf_event *event, int flags) +{ + struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events); + struct hw_perf_event *hwc = &event->hw; + + hw_events->events[hwc->idx] = event; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + csky_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static irqreturn_t csky_pmu_handle_irq(int irq_num, void *dev) +{ + struct perf_sample_data data; + struct pmu_hw_events *cpuc = this_cpu_ptr(csky_pmu.hw_events); + struct pt_regs *regs; + int idx; + + /* + * Did an overflow occur? + */ + if (!cprcr(HPOFSR)) + return IRQ_NONE; + + /* + * Handle the counter(s) overflow(s) + */ + regs = get_irq_regs(); + + csky_pmu_disable(&csky_pmu.pmu); + + for (idx = 0; idx < CSKY_PMU_MAX_EVENTS; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + /* Ignore if we don't have an event. */ + if (!event) + continue; + /* + * We have a single interrupt for all counters. Check that + * each counter has overflowed before we process it. + */ + if (!(cprcr(HPOFSR) & BIT(idx))) + continue; + + hwc = &event->hw; + csky_perf_event_update(event, &event->hw); + perf_sample_data_init(&data, 0, hwc->last_period); + csky_pmu_event_set_period(event); + + if (perf_event_overflow(event, &data, regs)) + csky_pmu_stop_event(event); + } + + csky_pmu_enable(&csky_pmu.pmu); + + /* + * Handle the pending perf events. + * + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this + * will not work. + */ + irq_work_run(); + + return IRQ_HANDLED; +} + +static int csky_pmu_request_irq(irq_handler_t handler) +{ + int err, irqs; + struct platform_device *pmu_device = csky_pmu.plat_device; + + if (!pmu_device) + return -ENODEV; + + irqs = min(pmu_device->num_resources, num_possible_cpus()); + if (irqs < 1) { + pr_err("no irqs for PMUs defined\n"); + return -ENODEV; + } + + csky_pmu_irq = platform_get_irq(pmu_device, 0); + if (csky_pmu_irq < 0) + return -ENODEV; + err = request_percpu_irq(csky_pmu_irq, handler, "csky-pmu", + this_cpu_ptr(csky_pmu.hw_events)); + if (err) { + pr_err("unable to request IRQ%d for CSKY PMU counters\n", + csky_pmu_irq); + return err; + } + + return 0; +} + +static void csky_pmu_free_irq(void) +{ + int irq; + struct platform_device *pmu_device = csky_pmu.plat_device; + + irq = platform_get_irq(pmu_device, 0); + if (irq >= 0) + free_percpu_irq(irq, this_cpu_ptr(csky_pmu.hw_events)); +} + +int init_hw_perf_events(void) +{ + csky_pmu.hw_events = alloc_percpu_gfp(struct pmu_hw_events, + GFP_KERNEL); + if (!csky_pmu.hw_events) { + pr_info("failed to allocate per-cpu PMU data.\n"); + return -ENOMEM; + } + + csky_pmu.pmu = (struct pmu) { + .pmu_enable = csky_pmu_enable, + .pmu_disable = csky_pmu_disable, + .event_init = csky_pmu_event_init, + .add = csky_pmu_add, + .del = csky_pmu_del, + .start = csky_pmu_start, + .stop = csky_pmu_stop, + .read = csky_pmu_read, + }; + + memset((void *)hw_raw_read_mapping, 0, + sizeof(hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])); + + hw_raw_read_mapping[0x1] = csky_pmu_read_cc; + hw_raw_read_mapping[0x2] = csky_pmu_read_ic; + hw_raw_read_mapping[0x3] = csky_pmu_read_icac; + hw_raw_read_mapping[0x4] = csky_pmu_read_icmc; + hw_raw_read_mapping[0x5] = csky_pmu_read_dcac; + hw_raw_read_mapping[0x6] = csky_pmu_read_dcmc; + hw_raw_read_mapping[0x7] = csky_pmu_read_l2ac; + hw_raw_read_mapping[0x8] = csky_pmu_read_l2mc; + hw_raw_read_mapping[0xa] = csky_pmu_read_iutlbmc; + hw_raw_read_mapping[0xb] = csky_pmu_read_dutlbmc; + hw_raw_read_mapping[0xc] = csky_pmu_read_jtlbmc; + hw_raw_read_mapping[0xd] = csky_pmu_read_softc; + hw_raw_read_mapping[0xe] = csky_pmu_read_cbmc; + hw_raw_read_mapping[0xf] = csky_pmu_read_cbic; + hw_raw_read_mapping[0x10] = csky_pmu_read_ibmc; + hw_raw_read_mapping[0x11] = csky_pmu_read_ibic; + hw_raw_read_mapping[0x12] = csky_pmu_read_lsfc; + hw_raw_read_mapping[0x13] = csky_pmu_read_sic; + hw_raw_read_mapping[0x14] = csky_pmu_read_dcrac; + hw_raw_read_mapping[0x15] = csky_pmu_read_dcrmc; + hw_raw_read_mapping[0x16] = csky_pmu_read_dcwac; + hw_raw_read_mapping[0x17] = csky_pmu_read_dcwmc; + hw_raw_read_mapping[0x18] = csky_pmu_read_l2rac; + hw_raw_read_mapping[0x19] = csky_pmu_read_l2rmc; + hw_raw_read_mapping[0x1a] = csky_pmu_read_l2wac; + hw_raw_read_mapping[0x1b] = csky_pmu_read_l2wmc; + + memset((void *)hw_raw_write_mapping, 0, + sizeof(hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])); + + hw_raw_write_mapping[0x1] = csky_pmu_write_cc; + hw_raw_write_mapping[0x2] = csky_pmu_write_ic; + hw_raw_write_mapping[0x3] = csky_pmu_write_icac; + hw_raw_write_mapping[0x4] = csky_pmu_write_icmc; + hw_raw_write_mapping[0x5] = csky_pmu_write_dcac; + hw_raw_write_mapping[0x6] = csky_pmu_write_dcmc; + hw_raw_write_mapping[0x7] = csky_pmu_write_l2ac; + hw_raw_write_mapping[0x8] = csky_pmu_write_l2mc; + hw_raw_write_mapping[0xa] = csky_pmu_write_iutlbmc; + hw_raw_write_mapping[0xb] = csky_pmu_write_dutlbmc; + hw_raw_write_mapping[0xc] = csky_pmu_write_jtlbmc; + hw_raw_write_mapping[0xd] = csky_pmu_write_softc; + hw_raw_write_mapping[0xe] = csky_pmu_write_cbmc; + hw_raw_write_mapping[0xf] = csky_pmu_write_cbic; + hw_raw_write_mapping[0x10] = csky_pmu_write_ibmc; + hw_raw_write_mapping[0x11] = csky_pmu_write_ibic; + hw_raw_write_mapping[0x12] = csky_pmu_write_lsfc; + hw_raw_write_mapping[0x13] = csky_pmu_write_sic; + hw_raw_write_mapping[0x14] = csky_pmu_write_dcrac; + hw_raw_write_mapping[0x15] = csky_pmu_write_dcrmc; + hw_raw_write_mapping[0x16] = csky_pmu_write_dcwac; + hw_raw_write_mapping[0x17] = csky_pmu_write_dcwmc; + hw_raw_write_mapping[0x18] = csky_pmu_write_l2rac; + hw_raw_write_mapping[0x19] = csky_pmu_write_l2rmc; + hw_raw_write_mapping[0x1a] = csky_pmu_write_l2wac; + hw_raw_write_mapping[0x1b] = csky_pmu_write_l2wmc; + + return 0; +} + +static int csky_pmu_starting_cpu(unsigned int cpu) +{ + enable_percpu_irq(csky_pmu_irq, 0); + return 0; +} + +static int csky_pmu_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(csky_pmu_irq); + return 0; +} + +int csky_pmu_device_probe(struct platform_device *pdev, + const struct of_device_id *of_table) +{ + struct device_node *node = pdev->dev.of_node; + int ret; + + ret = init_hw_perf_events(); + if (ret) { + pr_notice("[perf] failed to probe PMU!\n"); + return ret; + } + + if (of_property_read_u32(node, "count-width", + &csky_pmu.count_width)) { + csky_pmu.count_width = DEFAULT_COUNT_WIDTH; + } + csky_pmu.max_period = BIT_ULL(csky_pmu.count_width) - 1; + + csky_pmu.plat_device = pdev; + + /* Ensure the PMU has sane values out of reset. */ + on_each_cpu(csky_pmu_reset, &csky_pmu, 1); + + ret = csky_pmu_request_irq(csky_pmu_handle_irq); + if (ret) { + csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + pr_notice("[perf] PMU request irq fail!\n"); + } + + ret = cpuhp_setup_state(CPUHP_AP_PERF_ONLINE, "AP_PERF_ONLINE", + csky_pmu_starting_cpu, + csky_pmu_dying_cpu); + if (ret) { + csky_pmu_free_irq(); + free_percpu(csky_pmu.hw_events); + return ret; + } + + ret = perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW); + if (ret) { + csky_pmu_free_irq(); + free_percpu(csky_pmu.hw_events); + } + + return ret; +} + +static const struct of_device_id csky_pmu_of_device_ids[] = { + {.compatible = "csky,csky-pmu"}, + {}, +}; + +static int csky_pmu_dev_probe(struct platform_device *pdev) +{ + return csky_pmu_device_probe(pdev, csky_pmu_of_device_ids); +} + +static struct platform_driver csky_pmu_driver = { + .driver = { + .name = "csky-pmu", + .of_match_table = csky_pmu_of_device_ids, + }, + .probe = csky_pmu_dev_probe, +}; + +static int __init csky_pmu_probe(void) +{ + int ret; + + ret = platform_driver_register(&csky_pmu_driver); + if (ret) + pr_notice("[perf] PMU initialization failed\n"); + else + pr_notice("[perf] PMU initialization done\n"); + + return ret; +} + +device_initcall(csky_pmu_probe); diff --git a/arch/csky/kernel/perf_regs.c b/arch/csky/kernel/perf_regs.c new file mode 100644 index 000000000..09b7f88a2 --- /dev/null +++ b/arch/csky/kernel/perf_regs.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <linux/bug.h> +#include <asm/perf_regs.h> +#include <asm/ptrace.h> + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_CSKY_MAX)) + return 0; + + return (u64)*((u32 *)regs + idx); +} + +#define REG_RESERVED (~((1ULL << PERF_REG_CSKY_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_32; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/csky/kernel/power.c b/arch/csky/kernel/power.c new file mode 100644 index 000000000..923ee4e38 --- /dev/null +++ b/arch/csky/kernel/power.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/reboot.h> + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void machine_power_off(void) +{ + local_irq_disable(); + if (pm_power_off) + pm_power_off(); + asm volatile ("bkpt"); +} + +void machine_halt(void) +{ + local_irq_disable(); + if (pm_power_off) + pm_power_off(); + asm volatile ("bkpt"); +} + +void machine_restart(char *cmd) +{ + local_irq_disable(); + do_kernel_restart(cmd); + asm volatile ("bkpt"); +} diff --git a/arch/csky/kernel/probes/Makefile b/arch/csky/kernel/probes/Makefile new file mode 100644 index 000000000..1c7c6e6cb --- /dev/null +++ b/arch/csky/kernel/probes/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o +obj-$(CONFIG_KPROBES) += kprobes_trampoline.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o +obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o + +CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE) diff --git a/arch/csky/kernel/probes/decode-insn.c b/arch/csky/kernel/probes/decode-insn.c new file mode 100644 index 000000000..bbc4edc25 --- /dev/null +++ b/arch/csky/kernel/probes/decode-insn.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/kernel.h> +#include <linux/kprobes.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <asm/sections.h> + +#include "decode-insn.h" +#include "simulate-insn.h" + +/* Return: + * INSN_REJECTED If instruction is one not allowed to kprobe, + * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. + */ +enum probe_insn __kprobes +csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api) +{ + probe_opcode_t insn = le32_to_cpu(*addr); + + CSKY_INSN_SET_SIMULATE(br16, insn); + CSKY_INSN_SET_SIMULATE(bt16, insn); + CSKY_INSN_SET_SIMULATE(bf16, insn); + CSKY_INSN_SET_SIMULATE(jmp16, insn); + CSKY_INSN_SET_SIMULATE(jsr16, insn); + CSKY_INSN_SET_SIMULATE(lrw16, insn); + CSKY_INSN_SET_SIMULATE(pop16, insn); + + CSKY_INSN_SET_SIMULATE(br32, insn); + CSKY_INSN_SET_SIMULATE(bt32, insn); + CSKY_INSN_SET_SIMULATE(bf32, insn); + CSKY_INSN_SET_SIMULATE(jmp32, insn); + CSKY_INSN_SET_SIMULATE(jsr32, insn); + CSKY_INSN_SET_SIMULATE(lrw32, insn); + CSKY_INSN_SET_SIMULATE(pop32, insn); + + CSKY_INSN_SET_SIMULATE(bez32, insn); + CSKY_INSN_SET_SIMULATE(bnez32, insn); + CSKY_INSN_SET_SIMULATE(bnezad32, insn); + CSKY_INSN_SET_SIMULATE(bhsz32, insn); + CSKY_INSN_SET_SIMULATE(bhz32, insn); + CSKY_INSN_SET_SIMULATE(blsz32, insn); + CSKY_INSN_SET_SIMULATE(blz32, insn); + CSKY_INSN_SET_SIMULATE(bsr32, insn); + CSKY_INSN_SET_SIMULATE(jmpi32, insn); + CSKY_INSN_SET_SIMULATE(jsri32, insn); + + return INSN_GOOD; +} diff --git a/arch/csky/kernel/probes/decode-insn.h b/arch/csky/kernel/probes/decode-insn.h new file mode 100644 index 000000000..9c4ad48fe --- /dev/null +++ b/arch/csky/kernel/probes/decode-insn.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __CSKY_KERNEL_KPROBES_DECODE_INSN_H +#define __CSKY_KERNEL_KPROBES_DECODE_INSN_H + +#include <asm/sections.h> +#include <asm/kprobes.h> + +enum probe_insn { + INSN_REJECTED, + INSN_GOOD_NO_SLOT, + INSN_GOOD, +}; + +#define is_insn32(insn) ((insn & 0xc000) == 0xc000) + +enum probe_insn __kprobes +csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi); + +#endif /* __CSKY_KERNEL_KPROBES_DECODE_INSN_H */ diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c new file mode 100644 index 000000000..5264763d0 --- /dev/null +++ b/arch/csky/kernel/probes/ftrace.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/kprobes.h> + +int arch_check_ftrace_location(struct kprobe *p) +{ + if (ftrace_location((unsigned long)p->addr)) + p->flags |= KPROBE_FLAG_FTRACE; + return 0; +} + +/* Ftrace callback handler for kprobes -- called under preepmt disabed */ +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) +{ + bool lr_saver = false; + struct kprobe *p; + struct kprobe_ctlblk *kcb; + + /* Preempt is disabled by ftrace */ + p = get_kprobe((kprobe_opcode_t *)ip); + if (!p) { + p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE)); + if (unlikely(!p) || kprobe_disabled(p)) + return; + lr_saver = true; + } + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + unsigned long orig_ip = instruction_pointer(regs); + + if (lr_saver) + ip -= MCOUNT_INSN_SIZE; + instruction_pointer_set(regs, ip); + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) { + /* + * Emulate singlestep (and also recover regs->pc) + * as if there is a nop + */ + instruction_pointer_set(regs, + (unsigned long)p->addr + MCOUNT_INSN_SIZE); + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + instruction_pointer_set(regs, orig_ip); + } + /* + * If pre_handler returns !0, it changes regs->pc. We have to + * skip emulating post_handler. + */ + __this_cpu_write(current_kprobe, NULL); + } +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.api.insn = NULL; + return 0; +} diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c new file mode 100644 index 000000000..79272dde7 --- /dev/null +++ b/arch/csky/kernel/probes/kprobes.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/kprobes.h> +#include <linux/extable.h> +#include <linux/slab.h> +#include <linux/stop_machine.h> +#include <asm/ptrace.h> +#include <linux/uaccess.h> +#include <asm/sections.h> +#include <asm/cacheflush.h> + +#include "decode-insn.h" + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); + +struct csky_insn_patch { + kprobe_opcode_t *addr; + u32 opcode; + atomic_t cpu_count; +}; + +static int __kprobes patch_text_cb(void *priv) +{ + struct csky_insn_patch *param = priv; + unsigned int addr = (unsigned int)param->addr; + + if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) { + *(u16 *) addr = cpu_to_le16(param->opcode); + dcache_wb_range(addr, addr + 2); + atomic_inc(¶m->cpu_count); + } else { + while (atomic_read(¶m->cpu_count) <= num_online_cpus()) + cpu_relax(); + } + + icache_inv_range(addr, addr + 2); + + return 0; +} + +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +{ + struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) }; + + return stop_machine_cpuslocked(patch_text_cb, ¶m, cpu_online_mask); +} + +static void __kprobes arch_prepare_ss_slot(struct kprobe *p) +{ + unsigned long offset = is_insn32(p->opcode) ? 4 : 2; + + p->ainsn.api.restore = (unsigned long)p->addr + offset; + + patch_text(p->ainsn.api.insn, p->opcode); +} + +static void __kprobes arch_prepare_simulate(struct kprobe *p) +{ + p->ainsn.api.restore = 0; +} + +static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (p->ainsn.api.handler) + p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); + + post_kprobe_handler(kcb, regs); +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + unsigned long probe_addr = (unsigned long)p->addr; + + if (probe_addr & 0x1) { + pr_warn("Address not aligned.\n"); + return -EINVAL; + } + + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); + + /* decode instruction */ + switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) { + case INSN_REJECTED: /* insn not supported */ + return -EINVAL; + + case INSN_GOOD_NO_SLOT: /* insn need simulation */ + p->ainsn.api.insn = NULL; + break; + + case INSN_GOOD: /* instruction uses slot */ + p->ainsn.api.insn = get_insn_slot(); + if (!p->ainsn.api.insn) + return -ENOMEM; + break; + } + + /* prepare the instruction */ + if (p->ainsn.api.insn) + arch_prepare_ss_slot(p); + else + arch_prepare_simulate(p); + + return 0; +} + +/* install breakpoint in text */ +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, USR_BKPT); +} + +/* remove breakpoint from text */ +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, p->opcode); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (p->ainsn.api.insn) { + free_insn_slot(p->ainsn.api.insn, 0); + p->ainsn.api.insn = NULL; + } +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + +/* + * Interrupts need to be disabled before single-step mode is set, and not + * reenabled until after single-step mode ends. + * Without disabling interrupt on local CPU, there is a chance of + * interrupt occurrence in the period of exception return and start of + * out-of-line single-step, that result in wrongly single stepping + * into the interrupt handler. + */ +static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + kcb->saved_sr = regs->sr; + regs->sr &= ~BIT(6); +} + +static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + regs->sr = kcb->saved_sr; +} + +static void __kprobes +set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p) +{ + unsigned long offset = is_insn32(p->opcode) ? 4 : 2; + + kcb->ss_ctx.ss_pending = true; + kcb->ss_ctx.match_addr = addr + offset; +} + +static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) +{ + kcb->ss_ctx.ss_pending = false; + kcb->ss_ctx.match_addr = 0; +} + +#define TRACE_MODE_SI BIT(14) +#define TRACE_MODE_MASK ~(0x3 << 14) +#define TRACE_MODE_RUN 0 + +static void __kprobes setup_singlestep(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + unsigned long slot; + + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + if (p->ainsn.api.insn) { + /* prepare for single stepping */ + slot = (unsigned long)p->ainsn.api.insn; + + set_ss_context(kcb, slot, p); /* mark pending ss */ + + /* IRQs and single stepping do not mix well. */ + kprobes_save_local_irqflag(kcb, regs); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI; + instruction_pointer_set(regs, slot); + } else { + /* insn simulation */ + arch_simulate_insn(p, regs); + } +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + + return 1; +} + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + + if (!cur) + return; + + /* return addr restore if non-branching insn */ + if (cur->ainsn.api.restore != 0) + regs->pc = cur->ainsn.api.restore; + + /* restore back original saved kprobe variables and continue */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + return; + } + + /* call post handler */ + kcb->kprobe_status = KPROBE_HIT_SSDONE; + if (cur->post_handler) { + /* post_handler can hit breakpoint and single step + * again, so we enable D-flag for recursive exception. + */ + cur->post_handler(cur, regs, 0); + } + + reset_current_kprobe(); +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + switch (kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the ip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->pc = (unsigned long) cur->addr; + if (!instruction_pointer(regs)) + BUG(); + + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accounting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if (fixup_exception(regs)) + return 1; + } + return 0; +} + +int __kprobes +kprobe_breakpoint_handler(struct pt_regs *regs) +{ + struct kprobe *p, *cur_kprobe; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + kcb = get_kprobe_ctlblk(); + cur_kprobe = kprobe_running(); + + p = get_kprobe((kprobe_opcode_t *) addr); + + if (p) { + if (cur_kprobe) { + if (reenter_kprobe(p, regs, kcb)) + return 1; + } else { + /* Probe hit */ + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it will + * modify the execution path and no need to single + * stepping. Let's just reset current kprobe and exit. + * + * pre_handler can hit a breakpoint and can step thru + * before return. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); + } + return 1; + } + + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + * Return back to original instruction, and continue. + */ + return 0; +} + +int __kprobes +kprobe_single_step_handler(struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if ((kcb->ss_ctx.ss_pending) + && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) { + clear_ss_context(kcb); /* clear pending ss */ + + kprobes_restore_local_irqflag(kcb, regs); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN; + + post_kprobe_handler(kcb, regs); + return 1; + } + return 0; +} + +/* + * Provide a blacklist of symbols identifying ranges which cannot be kprobed. + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). + */ +int __init arch_populate_kprobe_blacklist(void) +{ + int ret; + + ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); + return ret; +} + +void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) +{ + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); +} + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *)regs->lr; + ri->fp = NULL; + regs->lr = (unsigned long) &kretprobe_trampoline; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} + +int __init arch_init_kprobes(void) +{ + return 0; +} diff --git a/arch/csky/kernel/probes/kprobes_trampoline.S b/arch/csky/kernel/probes/kprobes_trampoline.S new file mode 100644 index 000000000..b1fe3af24 --- /dev/null +++ b/arch/csky/kernel/probes/kprobes_trampoline.S @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#include <linux/linkage.h> + +#include <abi/entry.h> + +ENTRY(kretprobe_trampoline) + SAVE_REGS_FTRACE + + mov a0, sp /* pt_regs */ + + jbsr trampoline_probe_handler + + /* use the result as the return-address */ + mov lr, a0 + + RESTORE_REGS_FTRACE + rts +ENDPROC(kretprobe_trampoline) diff --git a/arch/csky/kernel/probes/simulate-insn.c b/arch/csky/kernel/probes/simulate-insn.c new file mode 100644 index 000000000..4e464fed5 --- /dev/null +++ b/arch/csky/kernel/probes/simulate-insn.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/kprobes.h> + +#include "decode-insn.h" +#include "simulate-insn.h" + +static inline bool csky_insn_reg_get_val(struct pt_regs *regs, + unsigned long index, + unsigned long *ptr) +{ + if (index < 14) + *ptr = *(®s->a0 + index); + + if (index > 15 && index < 31) + *ptr = *(®s->exregs[0] + index - 16); + + switch (index) { + case 14: + *ptr = regs->usp; + break; + case 15: + *ptr = regs->lr; + break; + case 31: + *ptr = regs->tls; + break; + default: + goto fail; + } + + return true; +fail: + return false; +} + +static inline bool csky_insn_reg_set_val(struct pt_regs *regs, + unsigned long index, + unsigned long val) +{ + if (index < 14) + *(®s->a0 + index) = val; + + if (index > 15 && index < 31) + *(®s->exregs[0] + index - 16) = val; + + switch (index) { + case 14: + regs->usp = val; + break; + case 15: + regs->lr = val; + break; + case 31: + regs->tls = val; + break; + default: + goto fail; + } + + return true; +fail: + return false; +} + +void __kprobes +simulate_br16(u32 opcode, long addr, struct pt_regs *regs) +{ + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); +} + +void __kprobes +simulate_br32(u32 opcode, long addr, struct pt_regs *regs) +{ + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); +} + +void __kprobes +simulate_bt16(u32 opcode, long addr, struct pt_regs *regs) +{ + if (regs->sr & 1) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); + else + instruction_pointer_set(regs, addr + 2); +} + +void __kprobes +simulate_bt32(u32 opcode, long addr, struct pt_regs *regs) +{ + if (regs->sr & 1) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bf16(u32 opcode, long addr, struct pt_regs *regs) +{ + if (!(regs->sr & 1)) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); + else + instruction_pointer_set(regs, addr + 2); +} + +void __kprobes +simulate_bf32(u32 opcode, long addr, struct pt_regs *regs) +{ + if (!(regs->sr & 1)) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_jmp16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = (opcode >> 2) & 0xf; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_jmp32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_jsr16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = (opcode >> 2) & 0xf; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + regs->lr = addr + 2; + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_jsr32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + regs->lr = addr + 4; + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_lrw16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long tmp = (opcode & 0x300) >> 3; + unsigned long offset = ((opcode & 0x1f) | tmp) << 2; + + tmp = (opcode & 0xe0) >> 5; + + val = *(unsigned int *)(instruction_pointer(regs) + offset); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_lrw32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long offset = (opcode & 0xffff0000) >> 14; + unsigned long tmp = opcode & 0x0000001f; + + val = *(unsigned int *) + ((instruction_pointer(regs) + offset) & 0xfffffffc); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_pop16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long *tmp = (unsigned long *)regs->usp; + int i; + + for (i = 0; i < (opcode & 0xf); i++) { + csky_insn_reg_set_val(regs, i + 4, *tmp); + tmp += 1; + } + + if (opcode & 0x10) { + csky_insn_reg_set_val(regs, 15, *tmp); + tmp += 1; + } + + regs->usp = (unsigned long)tmp; + + instruction_pointer_set(regs, regs->lr); +} + +void __kprobes +simulate_pop32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long *tmp = (unsigned long *)regs->usp; + int i; + + for (i = 0; i < ((opcode & 0xf0000) >> 16); i++) { + csky_insn_reg_set_val(regs, i + 4, *tmp); + tmp += 1; + } + + if (opcode & 0x100000) { + csky_insn_reg_set_val(regs, 15, *tmp); + tmp += 1; + } + + for (i = 0; i < ((opcode & 0xe00000) >> 21); i++) { + csky_insn_reg_set_val(regs, i + 16, *tmp); + tmp += 1; + } + + if (opcode & 0x1000000) { + csky_insn_reg_set_val(regs, 29, *tmp); + tmp += 1; + } + + regs->usp = (unsigned long)tmp; + + instruction_pointer_set(regs, regs->lr); +} + +void __kprobes +simulate_bez32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + if (tmp == 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bnez32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + if (tmp != 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + val -= 1; + + if (val > 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if (val >= 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if (val > 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if (val <= 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_blz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if (val < 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_bsr32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp; + + tmp = (opcode & 0xffff) << 16; + tmp |= (opcode & 0xffff0000) >> 16; + + instruction_pointer_set(regs, + addr + sign_extend32((tmp & 0x3ffffff) << 1, 15)); + + regs->lr = addr + 4; +} + +void __kprobes +simulate_jmpi32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long offset = ((opcode & 0xffff0000) >> 14); + + val = *(unsigned int *) + ((instruction_pointer(regs) + offset) & 0xfffffffc); + + instruction_pointer_set(regs, val); +} + +void __kprobes +simulate_jsri32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long offset = ((opcode & 0xffff0000) >> 14); + + val = *(unsigned int *) + ((instruction_pointer(regs) + offset) & 0xfffffffc); + + regs->lr = addr + 4; + + instruction_pointer_set(regs, val); +} diff --git a/arch/csky/kernel/probes/simulate-insn.h b/arch/csky/kernel/probes/simulate-insn.h new file mode 100644 index 000000000..ba4cb7ef0 --- /dev/null +++ b/arch/csky/kernel/probes/simulate-insn.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __CSKY_KERNEL_PROBES_SIMULATE_INSN_H +#define __CSKY_KERNEL_PROBES_SIMULATE_INSN_H + +#define __CSKY_INSN_FUNCS(name, mask, val) \ +static __always_inline bool csky_insn_is_##name(probe_opcode_t code) \ +{ \ + BUILD_BUG_ON(~(mask) & (val)); \ + return (code & (mask)) == (val); \ +} \ +void simulate_##name(u32 opcode, long addr, struct pt_regs *regs); + +#define CSKY_INSN_SET_SIMULATE(name, code) \ + do { \ + if (csky_insn_is_##name(code)) { \ + api->handler = simulate_##name; \ + return INSN_GOOD_NO_SLOT; \ + } \ + } while (0) + +__CSKY_INSN_FUNCS(br16, 0xfc00, 0x0400) +__CSKY_INSN_FUNCS(bt16, 0xfc00, 0x0800) +__CSKY_INSN_FUNCS(bf16, 0xfc00, 0x0c00) +__CSKY_INSN_FUNCS(jmp16, 0xffc3, 0x7800) +__CSKY_INSN_FUNCS(jsr16, 0xffc3, 0x7801) +__CSKY_INSN_FUNCS(lrw16, 0xfc00, 0x1000) +__CSKY_INSN_FUNCS(pop16, 0xffe0, 0x1480) + +__CSKY_INSN_FUNCS(br32, 0x0000ffff, 0x0000e800) +__CSKY_INSN_FUNCS(bt32, 0x0000ffff, 0x0000e860) +__CSKY_INSN_FUNCS(bf32, 0x0000ffff, 0x0000e840) +__CSKY_INSN_FUNCS(jmp32, 0xffffffe0, 0x0000e8c0) +__CSKY_INSN_FUNCS(jsr32, 0xffffffe0, 0x0000e8e0) +__CSKY_INSN_FUNCS(lrw32, 0x0000ffe0, 0x0000ea80) +__CSKY_INSN_FUNCS(pop32, 0xfe00ffff, 0x0000ebc0) + +__CSKY_INSN_FUNCS(bez32, 0x0000ffe0, 0x0000e900) +__CSKY_INSN_FUNCS(bnez32, 0x0000ffe0, 0x0000e920) +__CSKY_INSN_FUNCS(bnezad32, 0x0000ffe0, 0x0000e820) +__CSKY_INSN_FUNCS(bhsz32, 0x0000ffe0, 0x0000e9a0) +__CSKY_INSN_FUNCS(bhz32, 0x0000ffe0, 0x0000e940) +__CSKY_INSN_FUNCS(blsz32, 0x0000ffe0, 0x0000e960) +__CSKY_INSN_FUNCS(blz32, 0x0000ffe0, 0x0000e980) +__CSKY_INSN_FUNCS(bsr32, 0x0000fc00, 0x0000e000) +__CSKY_INSN_FUNCS(jmpi32, 0x0000ffff, 0x0000eac0) +__CSKY_INSN_FUNCS(jsri32, 0x0000ffff, 0x0000eae0) + +#endif /* __CSKY_KERNEL_PROBES_SIMULATE_INSN_H */ diff --git a/arch/csky/kernel/probes/uprobes.c b/arch/csky/kernel/probes/uprobes.c new file mode 100644 index 000000000..1a9e0961b --- /dev/null +++ b/arch/csky/kernel/probes/uprobes.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com> + */ +#include <linux/highmem.h> +#include <linux/ptrace.h> +#include <linux/uprobes.h> +#include <asm/cacheflush.h> + +#include "decode-insn.h" + +#define UPROBE_TRAP_NR UINT_MAX + +bool is_swbp_insn(uprobe_opcode_t *insn) +{ + return (*insn & 0xffff) == UPROBE_SWBP_INSN; +} + +unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) +{ + return instruction_pointer(regs); +} + +int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, + unsigned long addr) +{ + probe_opcode_t insn; + + insn = *(probe_opcode_t *)(&auprobe->insn[0]); + + auprobe->insn_size = is_insn32(insn) ? 4 : 2; + + switch (csky_probe_decode_insn(&insn, &auprobe->api)) { + case INSN_REJECTED: + return -EINVAL; + + case INSN_GOOD_NO_SLOT: + auprobe->simulate = true; + break; + + default: + break; + } + + return 0; +} + +int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + utask->autask.saved_trap_no = current->thread.trap_no; + current->thread.trap_no = UPROBE_TRAP_NR; + + instruction_pointer_set(regs, utask->xol_vaddr); + + user_enable_single_step(current); + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR); + + instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size); + + user_disable_single_step(current); + + return 0; +} + +bool arch_uprobe_xol_was_trapped(struct task_struct *t) +{ + if (t->thread.trap_no != UPROBE_TRAP_NR) + return true; + + return false; +} + +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + probe_opcode_t insn; + unsigned long addr; + + if (!auprobe->simulate) + return false; + + insn = *(probe_opcode_t *)(&auprobe->insn[0]); + addr = instruction_pointer(regs); + + if (auprobe->api.handler) + auprobe->api.handler(insn, addr, regs); + + return true; +} + +void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* + * Task has received a fatal signal, so reset back to probbed + * address. + */ + instruction_pointer_set(regs, utask->vaddr); + + user_disable_single_step(current); +} + +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CHAIN_CALL) + return regs->usp <= ret->stack; + else + return regs->usp < ret->stack; +} + +unsigned long +arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, + struct pt_regs *regs) +{ + unsigned long ra; + + ra = regs->lr; + + regs->lr = trampoline_vaddr; + + return ra; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} + +int uprobe_breakpoint_handler(struct pt_regs *regs) +{ + if (uprobe_pre_sstep_notifier(regs)) + return 1; + + return 0; +} + +int uprobe_single_step_handler(struct pt_regs *regs) +{ + if (uprobe_post_sstep_notifier(regs)) + return 1; + + return 0; +} diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c new file mode 100644 index 000000000..3d0ca22cd --- /dev/null +++ b/arch/csky/kernel/process.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/module.h> +#include <linux/version.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/debug.h> +#include <linux/delay.h> +#include <linux/kallsyms.h> +#include <linux/uaccess.h> +#include <linux/ptrace.h> + +#include <asm/elf.h> +#include <abi/reg_ops.h> + +struct cpuinfo_csky cpu_data[NR_CPUS]; + +#ifdef CONFIG_STACKPROTECTOR +#include <linux/stackprotector.h> +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + +asmlinkage void ret_from_fork(void); +asmlinkage void ret_from_kernel_thread(void); + +/* + * Some archs flush debug and FPU info here + */ +void flush_thread(void){} + +int copy_thread(unsigned long clone_flags, + unsigned long usp, + unsigned long kthread_arg, + struct task_struct *p, + unsigned long tls) +{ + struct switch_stack *childstack; + struct pt_regs *childregs = task_pt_regs(p); + +#ifdef CONFIG_CPU_HAS_FPU + save_to_user_fp(&p->thread.user_fp); +#endif + + childstack = ((struct switch_stack *) childregs) - 1; + memset(childstack, 0, sizeof(struct switch_stack)); + + /* setup thread.sp for switch_to !!! */ + p->thread.sp = (unsigned long)childstack; + + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { + memset(childregs, 0, sizeof(struct pt_regs)); + childstack->r15 = (unsigned long) ret_from_kernel_thread; + childstack->r10 = kthread_arg; + childstack->r9 = usp; + childregs->sr = mfcr("psr"); + } else { + *childregs = *(current_pt_regs()); + if (usp) + childregs->usp = usp; + if (clone_flags & CLONE_SETTLS) + task_thread_info(p)->tp_value = childregs->tls + = tls; + + childregs->a0 = 0; + childstack->r15 = (unsigned long) ret_from_fork; + } + + return 0; +} + +/* Fill in the fpu structure for a core dump. */ +int dump_fpu(struct pt_regs *regs, struct user_fp *fpu) +{ + memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu)); + return 1; +} +EXPORT_SYMBOL(dump_fpu); + +int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs) +{ + struct pt_regs *regs = task_pt_regs(tsk); + + /* NOTE: usp is error value. */ + ELF_CORE_COPY_REGS((*pr_regs), regs) + + return 1; +} + +#ifndef CONFIG_CPU_PM_NONE +void arch_cpu_idle(void) +{ +#ifdef CONFIG_CPU_PM_WAIT + asm volatile("wait\n"); +#endif + +#ifdef CONFIG_CPU_PM_DOZE + asm volatile("doze\n"); +#endif + +#ifdef CONFIG_CPU_PM_STOP + asm volatile("stop\n"); +#endif + raw_local_irq_enable(); +} +#endif diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c new file mode 100644 index 000000000..ac07695bc --- /dev/null +++ b/arch/csky/kernel/ptrace.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/audit.h> +#include <linux/elf.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/ptrace.h> +#include <linux/regset.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/signal.h> +#include <linux/smp.h> +#include <linux/tracehook.h> +#include <linux/uaccess.h> +#include <linux/user.h> + +#include <asm/thread_info.h> +#include <asm/page.h> +#include <asm/processor.h> +#include <asm/asm-offsets.h> + +#include <abi/regdef.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> + +/* sets the trace bits. */ +#define TRACE_MODE_SI (1 << 14) +#define TRACE_MODE_RUN 0 +#define TRACE_MODE_MASK ~(0x3 << 14) + +/* + * Make sure the single step bit is not set. + */ +static void singlestep_disable(struct task_struct *tsk) +{ + struct pt_regs *regs; + + regs = task_pt_regs(tsk); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN; + + /* Enable irq */ + regs->sr |= BIT(6); +} + +static void singlestep_enable(struct task_struct *tsk) +{ + struct pt_regs *regs; + + regs = task_pt_regs(tsk); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI; + + /* Disable irq */ + regs->sr &= ~BIT(6); +} + +/* + * Make sure the single step bit is set. + */ +void user_enable_single_step(struct task_struct *child) +{ + singlestep_enable(child); +} + +void user_disable_single_step(struct task_struct *child) +{ + singlestep_disable(child); +} + +enum csky_regset { + REGSET_GPR, + REGSET_FPR, +}; + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct pt_regs *regs = task_pt_regs(target); + + /* Abiv1 regs->tls is fake and we need sync here. */ + regs->tls = task_thread_info(target)->tp_value; + + return membuf_write(&to, regs, sizeof(*regs)); +} + +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct pt_regs regs; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, -1); + if (ret) + return ret; + + /* BIT(0) of regs.sr is Condition Code/Carry bit */ + regs.sr = (regs.sr & BIT(0)) | (task_pt_regs(target)->sr & ~BIT(0)); +#ifdef CONFIG_CPU_HAS_HILO + regs.dcsr = task_pt_regs(target)->dcsr; +#endif + task_thread_info(target)->tp_value = regs.tls; + + *task_pt_regs(target) = regs; + + return 0; +} + +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct user_fp *regs = (struct user_fp *)&target->thread.user_fp; + +#if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP) + int i; + struct user_fp tmp = *regs; + + for (i = 0; i < 16; i++) { + tmp.vr[i*4] = regs->vr[i*2]; + tmp.vr[i*4 + 1] = regs->vr[i*2 + 1]; + } + + for (i = 0; i < 32; i++) + tmp.vr[64 + i] = regs->vr[32 + i]; + + return membuf_write(&to, &tmp, sizeof(tmp)); +#else + return membuf_write(&to, regs, sizeof(*regs)); +#endif +} + +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_fp *regs = (struct user_fp *)&target->thread.user_fp; + +#if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP) + int i; + struct user_fp tmp; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tmp, 0, -1); + + *regs = tmp; + + for (i = 0; i < 16; i++) { + regs->vr[i*2] = tmp.vr[i*4]; + regs->vr[i*2 + 1] = tmp.vr[i*4 + 1]; + } + + for (i = 0; i < 32; i++) + regs->vr[32 + i] = tmp.vr[64 + i]; +#else + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1); +#endif + + return ret; +} + +static const struct user_regset csky_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = sizeof(struct pt_regs) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = gpr_get, + .set = gpr_set, + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fp) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = fpr_get, + .set = fpr_set, + }, +}; + +static const struct user_regset_view user_csky_view = { + .name = "csky", + .e_machine = ELF_ARCH, + .regsets = csky_regsets, + .n = ARRAY_SIZE(csky_regsets), +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_csky_view; +} + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + REG_OFFSET_NAME(tls), + REG_OFFSET_NAME(lr), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(sr), + REG_OFFSET_NAME(usp), + REG_OFFSET_NAME(orig_a0), + REG_OFFSET_NAME(a0), + REG_OFFSET_NAME(a1), + REG_OFFSET_NAME(a2), + REG_OFFSET_NAME(a3), + REG_OFFSET_NAME(regs[0]), + REG_OFFSET_NAME(regs[1]), + REG_OFFSET_NAME(regs[2]), + REG_OFFSET_NAME(regs[3]), + REG_OFFSET_NAME(regs[4]), + REG_OFFSET_NAME(regs[5]), + REG_OFFSET_NAME(regs[6]), + REG_OFFSET_NAME(regs[7]), + REG_OFFSET_NAME(regs[8]), + REG_OFFSET_NAME(regs[9]), +#if defined(__CSKYABIV2__) + REG_OFFSET_NAME(exregs[0]), + REG_OFFSET_NAME(exregs[1]), + REG_OFFSET_NAME(exregs[2]), + REG_OFFSET_NAME(exregs[3]), + REG_OFFSET_NAME(exregs[4]), + REG_OFFSET_NAME(exregs[5]), + REG_OFFSET_NAME(exregs[6]), + REG_OFFSET_NAME(exregs[7]), + REG_OFFSET_NAME(exregs[8]), + REG_OFFSET_NAME(exregs[9]), + REG_OFFSET_NAME(exregs[10]), + REG_OFFSET_NAME(exregs[11]), + REG_OFFSET_NAME(exregs[12]), + REG_OFFSET_NAME(exregs[13]), + REG_OFFSET_NAME(exregs[14]), + REG_OFFSET_NAME(rhi), + REG_OFFSET_NAME(rlo), + REG_OFFSET_NAME(dcsr), +#endif + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return (addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + +void ptrace_disable(struct task_struct *child) +{ + singlestep_disable(child); +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + long ret = -EIO; + + switch (request) { + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +asmlinkage int syscall_trace_enter(struct pt_regs *regs) +{ + if (test_thread_flag(TIF_SYSCALL_TRACE)) + if (tracehook_report_syscall_entry(regs)) + return -1; + + if (secure_computing() == -1) + return -1; + + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_enter(regs, syscall_get_nr(current, regs)); + + audit_syscall_entry(regs_syscallid(regs), regs->a0, regs->a1, regs->a2, regs->a3); + return 0; +} + +asmlinkage void syscall_trace_exit(struct pt_regs *regs) +{ + audit_syscall_exit(regs); + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, 0); + + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_exit(regs, syscall_get_return_value(current, regs)); +} + +void show_regs(struct pt_regs *fp) +{ + pr_info("\nCURRENT PROCESS:\n\n"); + pr_info("COMM=%s PID=%d\n", current->comm, current->pid); + + if (current->mm) { + pr_info("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n", + (int) current->mm->start_code, + (int) current->mm->end_code, + (int) current->mm->start_data, + (int) current->mm->end_data, + (int) current->mm->end_data, + (int) current->mm->brk); + pr_info("USER-STACK=%08x KERNEL-STACK=%08x\n\n", + (int) current->mm->start_stack, + (int) (((unsigned long) current) + 2 * PAGE_SIZE)); + } + + pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc); + pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr); + pr_info("SP: 0x%08lx\n", (long)fp); + pr_info("orig_a0: 0x%08lx\n", fp->orig_a0); + pr_info("PSR: 0x%08lx\n", (long)fp->sr); + + pr_info(" a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n", + fp->a0, fp->a1, fp->a2, fp->a3); +#if defined(__CSKYABIV2__) + pr_info(" r4: 0x%08lx r5: 0x%08lx r6: 0x%08lx r7: 0x%08lx\n", + fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); + pr_info(" r8: 0x%08lx r9: 0x%08lx r10: 0x%08lx r11: 0x%08lx\n", + fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); + pr_info("r12: 0x%08lx r13: 0x%08lx r15: 0x%08lx\n", + fp->regs[8], fp->regs[9], fp->lr); + pr_info("r16: 0x%08lx r17: 0x%08lx r18: 0x%08lx r19: 0x%08lx\n", + fp->exregs[0], fp->exregs[1], fp->exregs[2], fp->exregs[3]); + pr_info("r20: 0x%08lx r21: 0x%08lx r22: 0x%08lx r23: 0x%08lx\n", + fp->exregs[4], fp->exregs[5], fp->exregs[6], fp->exregs[7]); + pr_info("r24: 0x%08lx r25: 0x%08lx r26: 0x%08lx r27: 0x%08lx\n", + fp->exregs[8], fp->exregs[9], fp->exregs[10], fp->exregs[11]); + pr_info("r28: 0x%08lx r29: 0x%08lx r30: 0x%08lx tls: 0x%08lx\n", + fp->exregs[12], fp->exregs[13], fp->exregs[14], fp->tls); + pr_info(" hi: 0x%08lx lo: 0x%08lx\n", + fp->rhi, fp->rlo); +#else + pr_info(" r6: 0x%08lx r7: 0x%08lx r8: 0x%08lx r9: 0x%08lx\n", + fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); + pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n", + fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); + pr_info("r14: 0x%08lx r1: 0x%08lx\n", + fp->regs[8], fp->regs[9]); +#endif + + return; +} diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c new file mode 100644 index 000000000..e4cab1605 --- /dev/null +++ b/arch/csky/kernel/setup.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/console.h> +#include <linux/memblock.h> +#include <linux/initrd.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/start_kernel.h> +#include <linux/dma-map-ops.h> +#include <linux/screen_info.h> +#include <asm/sections.h> +#include <asm/mmu_context.h> +#include <asm/pgalloc.h> + +#ifdef CONFIG_DUMMY_CONSOLE +struct screen_info screen_info = { + .orig_video_lines = 30, + .orig_video_cols = 80, + .orig_video_mode = 0, + .orig_video_ega_bx = 0, + .orig_video_isVGA = 1, + .orig_video_points = 8 +}; +#endif + +static void __init csky_memblock_init(void) +{ + unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); + unsigned long sseg_size = PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET); + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; + signed long size; + + memblock_reserve(__pa(_stext), _end - _stext); + + early_init_fdt_reserve_self(); + early_init_fdt_scan_reserved_mem(); + + memblock_dump_all(); + + min_low_pfn = PFN_UP(memblock_start_of_DRAM()); + max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM()); + + size = max_pfn - min_low_pfn; + + if (size >= lowmem_size) { + max_low_pfn = min_low_pfn + lowmem_size; + write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE); + } else if (size > sseg_size) { + max_low_pfn = min_low_pfn + sseg_size; + } + + max_zone_pfn[ZONE_NORMAL] = max_low_pfn; + +#ifdef CONFIG_HIGHMEM + max_zone_pfn[ZONE_HIGHMEM] = max_pfn; + + highstart_pfn = max_low_pfn; + highend_pfn = max_pfn; +#endif + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + + dma_contiguous_reserve(0); + + free_area_init(max_zone_pfn); +} + +void __init setup_arch(char **cmdline_p) +{ + *cmdline_p = boot_command_line; + + console_verbose(); + + pr_info("Phys. mem: %ldMB\n", + (unsigned long) memblock_phys_mem_size()/1024/1024); + + init_mm.start_code = (unsigned long) _stext; + init_mm.end_code = (unsigned long) _etext; + init_mm.end_data = (unsigned long) _edata; + init_mm.brk = (unsigned long) _end; + + parse_early_param(); + + csky_memblock_init(); + + unflatten_and_copy_device_tree(); + +#ifdef CONFIG_SMP + setup_smp(); +#endif + + sparse_init(); + + fixaddr_init(); + +#ifdef CONFIG_HIGHMEM + kmap_init(); +#endif +} + +unsigned long va_pa_offset; +EXPORT_SYMBOL(va_pa_offset); + +asmlinkage __visible void __init csky_start(unsigned int unused, + void *dtb_start) +{ + /* Clean up bss section */ + memset(__bss_start, 0, __bss_stop - __bss_start); + + va_pa_offset = read_mmu_msa0() & ~(SSEG_SIZE - 1); + + pre_trap_init(); + pre_mmu_init(); + + if (dtb_start == NULL) + early_init_dt_scan(__dtb_start); + else + early_init_dt_scan(dtb_start); + + start_kernel(); + + asm volatile("br .\n"); +} diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c new file mode 100644 index 000000000..f7c1677e5 --- /dev/null +++ b/arch/csky/kernel/signal.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/signal.h> +#include <linux/uaccess.h> +#include <linux/syscalls.h> +#include <linux/tracehook.h> + +#include <asm/traps.h> +#include <asm/ucontext.h> +#include <asm/vdso.h> + +#include <abi/regdef.h> + +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +static int restore_fpu_state(struct sigcontext __user *sc) +{ + int err = 0; + struct user_fp user_fp; + + err = __copy_from_user(&user_fp, &sc->sc_user_fp, sizeof(user_fp)); + + restore_from_user_fp(&user_fp); + + return err; +} + +static int save_fpu_state(struct sigcontext __user *sc) +{ + struct user_fp user_fp; + + save_to_user_fp(&user_fp); + + return __copy_to_user(&sc->sc_user_fp, &user_fp, sizeof(user_fp)); +} +#else +#define restore_fpu_state(sigcontext) (0) +#define save_fpu_state(sigcontext) (0) +#endif + +struct rt_sigframe { + /* + * pad[3] is compatible with the same struct defined in + * gcc/libgcc/config/csky/linux-unwind.h + */ + int pad[3]; + struct siginfo info; + struct ucontext uc; +}; + +static long restore_sigcontext(struct pt_regs *regs, + struct sigcontext __user *sc) +{ + int err = 0; + unsigned long sr = regs->sr; + + /* sc_pt_regs is structured the same as the start of pt_regs */ + err |= __copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs)); + + /* BIT(0) of regs->sr is Condition Code/Carry bit */ + regs->sr = (sr & ~1) | (regs->sr & 1); + + /* Restore the floating-point state. */ + err |= restore_fpu_state(sc); + + return err; +} + +SYSCALL_DEFINE0(rt_sigreturn) +{ + struct pt_regs *regs = current_pt_regs(); + struct rt_sigframe __user *frame; + sigset_t set; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + frame = (struct rt_sigframe __user *)regs->usp; + + if (!access_ok(frame, sizeof(*frame))) + goto badframe; + + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + set_current_blocked(&set); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) + goto badframe; + + if (restore_altstack(&frame->uc.uc_stack)) + goto badframe; + + return regs->a0; + +badframe: + force_sig(SIGSEGV); + return 0; +} + +static int setup_sigcontext(struct rt_sigframe __user *frame, + struct pt_regs *regs) +{ + struct sigcontext __user *sc = &frame->uc.uc_mcontext; + int err = 0; + + err |= __copy_to_user(&sc->sc_pt_regs, regs, sizeof(struct pt_regs)); + err |= save_fpu_state(sc); + + return err; +} + +static inline void __user *get_sigframe(struct ksignal *ksig, + struct pt_regs *regs, size_t framesize) +{ + unsigned long sp; + /* Default to using normal stack */ + sp = regs->usp; + + /* + * If we are on the alternate signal stack and would overflow it, don't. + * Return an always-bogus address instead so we will die with SIGSEGV. + */ + if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) + return (void __user __force *)(-1UL); + + /* This is the X/Open sanctioned signal stack switching. */ + sp = sigsp(sp, ksig) - framesize; + + /* Align the stack frame. */ + sp &= -8UL; + + return (void __user *)sp; +} + +static int +setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + int err = 0; + struct csky_vdso *vdso = current->mm->context.vdso; + + frame = get_sigframe(ksig, regs, sizeof(*frame)); + if (!access_ok(frame, sizeof(*frame))) + return -EFAULT; + + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(NULL, &frame->uc.uc_link); + err |= __save_altstack(&frame->uc.uc_stack, regs->usp); + err |= setup_sigcontext(frame, regs); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + if (err) + return -EFAULT; + + /* Set up to return from userspace. */ + regs->lr = (unsigned long)(vdso->rt_signal_retcode); + + /* + * Set up registers for signal handler. + * Registers that we don't modify keep the value they had from + * user-space at the time we took the signal. + * We always pass siginfo and mcontext, regardless of SA_SIGINFO, + * since some things rely on this (e.g. glibc's debug/segfault.c). + */ + regs->pc = (unsigned long)ksig->ka.sa.sa_handler; + regs->usp = (unsigned long)frame; + regs->a0 = ksig->sig; /* a0: signal number */ + regs->a1 = (unsigned long)(&(frame->info)); /* a1: siginfo pointer */ + regs->a2 = (unsigned long)(&(frame->uc)); /* a2: ucontext pointer */ + + return 0; +} + +static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int ret; + + rseq_signal_deliver(ksig, regs); + + /* Are we from a system call? */ + if (in_syscall(regs)) { + /* Avoid additional syscall restarting via ret_from_exception */ + forget_syscall(regs); + + /* If so, check system call restarting.. */ + switch (regs->a0) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + regs->a0 = -EINTR; + break; + + case -ERESTARTSYS: + if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { + regs->a0 = -EINTR; + break; + } + fallthrough; + case -ERESTARTNOINTR: + regs->a0 = regs->orig_a0; + regs->pc -= TRAP0_SIZE; + break; + } + } + + /* Set up the stack frame */ + ret = setup_rt_frame(ksig, oldset, regs); + + signal_setup_done(ret, ksig, 0); +} + +static void do_signal(struct pt_regs *regs) +{ + struct ksignal ksig; + + if (get_signal(&ksig)) { + /* Actually deliver the signal */ + handle_signal(&ksig, regs); + return; + } + + /* Did we come from a system call? */ + if (in_syscall(regs)) { + /* Avoid additional syscall restarting via ret_from_exception */ + forget_syscall(regs); + + /* Restart the system call - no handlers present */ + switch (regs->a0) { + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + regs->a0 = regs->orig_a0; + regs->pc -= TRAP0_SIZE; + break; + case -ERESTART_RESTARTBLOCK: + regs->a0 = regs->orig_a0; + regs_syscallid(regs) = __NR_restart_syscall; + regs->pc -= TRAP0_SIZE; + break; + } + } + + /* + * If there is no signal to deliver, we just put the saved + * sigmask back. + */ + restore_saved_sigmask(); +} + +/* + * notification of userspace execution resumption + * - triggered by the _TIF_WORK_MASK flags + */ +asmlinkage void do_notify_resume(struct pt_regs *regs, + unsigned long thread_info_flags) +{ + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + + /* Handle pending signal delivery */ + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + + if (thread_info_flags & _TIF_NOTIFY_RESUME) { + tracehook_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); + } +} diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c new file mode 100644 index 000000000..1a8d7eaf1 --- /dev/null +++ b/arch/csky/kernel/smp.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/kernel_stat.h> +#include <linux/notifier.h> +#include <linux/cpu.h> +#include <linux/percpu.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/irq.h> +#include <linux/irq_work.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/seq_file.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/mm.h> +#include <linux/sched/hotplug.h> +#include <asm/irq.h> +#include <asm/traps.h> +#include <asm/sections.h> +#include <asm/mmu_context.h> +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +#endif + +enum ipi_message_type { + IPI_EMPTY, + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_IRQ_WORK, + IPI_MAX +}; + +struct ipi_data_struct { + unsigned long bits ____cacheline_aligned; + unsigned long stats[IPI_MAX] ____cacheline_aligned; +}; +static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data); + +static irqreturn_t handle_ipi(int irq, void *dev) +{ + unsigned long *stats = this_cpu_ptr(&ipi_data)->stats; + + while (true) { + unsigned long ops; + + ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0); + if (ops == 0) + return IRQ_HANDLED; + + if (ops & (1 << IPI_RESCHEDULE)) { + stats[IPI_RESCHEDULE]++; + scheduler_ipi(); + } + + if (ops & (1 << IPI_CALL_FUNC)) { + stats[IPI_CALL_FUNC]++; + generic_smp_call_function_interrupt(); + } + + if (ops & (1 << IPI_IRQ_WORK)) { + stats[IPI_IRQ_WORK]++; + irq_work_run(); + } + + BUG_ON((ops >> IPI_MAX) != 0); + } + + return IRQ_HANDLED; +} + +static void (*send_arch_ipi)(const struct cpumask *mask); + +static int ipi_irq; +void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq) +{ + if (send_arch_ipi) + return; + + send_arch_ipi = func; + ipi_irq = irq; +} + +static void +send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) +{ + int i; + + for_each_cpu(i, to_whom) + set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits); + + smp_mb(); + send_arch_ipi(to_whom); +} + +static const char * const ipi_names[] = { + [IPI_EMPTY] = "Empty interrupts", + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNC] = "Function call interrupts", + [IPI_IRQ_WORK] = "Irq work interrupts", +}; + +int arch_show_interrupts(struct seq_file *p, int prec) +{ + unsigned int cpu, i; + + for (i = 0; i < IPI_MAX; i++) { + seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, + prec >= 4 ? " " : ""); + for_each_online_cpu(cpu) + seq_printf(p, "%10lu ", + per_cpu_ptr(&ipi_data, cpu)->stats[i]); + seq_printf(p, " %s\n", ipi_names[i]); + } + + return 0; +} + +void arch_send_call_function_ipi_mask(struct cpumask *mask) +{ + send_ipi_message(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); +} + +static void ipi_stop(void *unused) +{ + while (1); +} + +void smp_send_stop(void) +{ + on_each_cpu(ipi_stop, NULL, 1); +} + +void smp_send_reschedule(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +} + +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); +} +#endif + +void __init smp_prepare_boot_cpu(void) +{ +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ +} + +static int ipi_dummy_dev; + +void __init setup_smp_ipi(void) +{ + int rc; + + if (ipi_irq == 0) + return; + + rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", + &ipi_dummy_dev); + if (rc) + panic("%s IRQ request failed\n", __func__); + + enable_percpu_irq(ipi_irq, 0); +} + +void __init setup_smp(void) +{ + struct device_node *node = NULL; + int cpu; + + for_each_of_cpu_node(node) { + if (!of_device_is_available(node)) + continue; + + if (of_property_read_u32(node, "reg", &cpu)) + continue; + + if (cpu >= NR_CPUS) + continue; + + set_cpu_possible(cpu, true); + set_cpu_present(cpu, true); + } +} + +extern void _start_smp_secondary(void); + +volatile unsigned int secondary_hint; +volatile unsigned int secondary_hint2; +volatile unsigned int secondary_ccr; +volatile unsigned int secondary_stack; + +unsigned long secondary_msa1; + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + unsigned long mask = 1 << cpu; + + secondary_stack = + (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; + secondary_hint = mfcr("cr31"); + secondary_hint2 = mfcr("cr<21, 1>"); + secondary_ccr = mfcr("cr18"); + secondary_msa1 = read_mmu_msa1(); + + /* + * Because other CPUs are in reset status, we must flush data + * from cache to out and secondary CPUs use them in + * csky_start_secondary(void) + */ + mtcr("cr17", 0x22); + + if (mask & mfcr("cr<29, 0>")) { + send_arch_ipi(cpumask_of(cpu)); + } else { + /* Enable cpu in SMP reset ctrl reg */ + mask |= mfcr("cr<29, 0>"); + mtcr("cr<29, 0>", mask); + } + + /* Wait for the cpu online */ + while (!cpu_online(cpu)); + + secondary_stack = 0; + + return 0; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ +} + +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + +void csky_start_secondary(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + + mtcr("cr31", secondary_hint); + mtcr("cr<21, 1>", secondary_hint2); + mtcr("cr18", secondary_ccr); + + mtcr("vbr", vec_base); + + flush_tlb_all(); + write_mmu_pagemask(0); + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); + TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); + +#ifdef CONFIG_CPU_HAS_FPU + init_fpu(); +#endif + + enable_percpu_irq(ipi_irq, 0); + + mmget(mm); + mmgrab(mm); + current->active_mm = mm; + cpumask_set_cpu(cpu, mm_cpumask(mm)); + + notify_cpu_starting(cpu); + set_cpu_online(cpu, true); + + pr_info("CPU%u Online: %s...\n", cpu, __func__); + + local_irq_enable(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + + set_cpu_online(cpu, false); + + irq_migrate_all_off_this_cpu(); + + clear_tasks_mm_cpumask(cpu); + + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + if (!cpu_wait_death(cpu, 5)) { + pr_crit("CPU%u: shutdown failed\n", cpu); + return; + } + pr_notice("CPU%u: shutdown\n", cpu); +} + +void arch_cpu_idle_dead(void) +{ + idle_task_exit(); + + cpu_report_death(); + + while (!secondary_stack) + arch_cpu_idle(); + + local_irq_disable(); + + asm volatile( + "mov sp, %0\n" + "mov r8, %0\n" + "jmpi csky_start_secondary" + : + : "r" (secondary_stack)); +} +#endif diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c new file mode 100644 index 000000000..16ae20a0a --- /dev/null +++ b/arch/csky/kernel/stacktrace.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> +#include <linux/stacktrace.h> +#include <linux/ftrace.h> +#include <linux/ptrace.h> + +#ifdef CONFIG_FRAME_POINTER + +struct stackframe { + unsigned long fp; + unsigned long ra; +}; + +void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, + bool (*fn)(unsigned long, void *), void *arg) +{ + unsigned long fp, sp, pc; + + if (regs) { + fp = frame_pointer(regs); + sp = user_stack_pointer(regs); + pc = instruction_pointer(regs); + } else if (task == NULL || task == current) { + const register unsigned long current_sp __asm__ ("sp"); + const register unsigned long current_fp __asm__ ("r8"); + fp = current_fp; + sp = current_sp; + pc = (unsigned long)walk_stackframe; + } else { + /* task blocked in __switch_to */ + fp = thread_saved_fp(task); + sp = thread_saved_sp(task); + pc = thread_saved_lr(task); + } + + for (;;) { + unsigned long low, high; + struct stackframe *frame; + + if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) + break; + + /* Validate frame pointer */ + low = sp; + high = ALIGN(sp, THREAD_SIZE); + if (unlikely(fp < low || fp > high || fp & 0x3)) + break; + /* Unwind stack frame */ + frame = (struct stackframe *)fp; + sp = fp; + fp = frame->fp; + pc = ftrace_graph_ret_addr(current, NULL, frame->ra, + (unsigned long *)(fp - 8)); + } +} + +#else /* !CONFIG_FRAME_POINTER */ + +static void notrace walk_stackframe(struct task_struct *task, + struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) +{ + unsigned long sp, pc; + unsigned long *ksp; + + if (regs) { + sp = user_stack_pointer(regs); + pc = instruction_pointer(regs); + } else if (task == NULL || task == current) { + const register unsigned long current_sp __asm__ ("sp"); + sp = current_sp; + pc = (unsigned long)walk_stackframe; + } else { + /* task blocked in __switch_to */ + sp = thread_saved_sp(task); + pc = thread_saved_lr(task); + } + + if (unlikely(sp & 0x3)) + return; + + ksp = (unsigned long *)sp; + while (!kstack_end(ksp)) { + if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) + break; + pc = (*ksp++) - 0x4; + } +} +#endif /* CONFIG_FRAME_POINTER */ + +static bool print_trace_address(unsigned long pc, void *arg) +{ + print_ip_sym((const char *)arg, pc); + return false; +} + +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) +{ + pr_cont("Call Trace:\n"); + walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); +} + +static bool save_wchan(unsigned long pc, void *arg) +{ + if (!in_sched_functions(pc)) { + unsigned long *p = arg; + *p = pc; + return true; + } + return false; +} + +unsigned long get_wchan(struct task_struct *task) +{ + unsigned long pc = 0; + + if (likely(task && task != current && task->state != TASK_RUNNING)) + walk_stackframe(task, NULL, save_wchan, &pc); + return pc; +} + +#ifdef CONFIG_STACKTRACE +static bool __save_trace(unsigned long pc, void *arg, bool nosched) +{ + struct stack_trace *trace = arg; + + if (unlikely(nosched && in_sched_functions(pc))) + return false; + if (unlikely(trace->skip > 0)) { + trace->skip--; + return false; + } + + trace->entries[trace->nr_entries++] = pc; + return (trace->nr_entries >= trace->max_entries); +} + +static bool save_trace(unsigned long pc, void *arg) +{ + return __save_trace(pc, arg, false); +} + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + walk_stackframe(tsk, NULL, save_trace, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + save_stack_trace_tsk(NULL, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +#endif /* CONFIG_STACKTRACE */ diff --git a/arch/csky/kernel/syscall.c b/arch/csky/kernel/syscall.c new file mode 100644 index 000000000..3d30e58a4 --- /dev/null +++ b/arch/csky/kernel/syscall.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/syscalls.h> + +SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) +{ + struct thread_info *ti = task_thread_info(current); + struct pt_regs *reg = current_pt_regs(); + + reg->tls = addr; + ti->tp_value = addr; + + return 0; +} + +SYSCALL_DEFINE6(mmap2, + unsigned long, addr, + unsigned long, len, + unsigned long, prot, + unsigned long, flags, + unsigned long, fd, + off_t, offset) +{ + if (unlikely(offset & (~PAGE_MASK >> 12))) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, + offset >> (PAGE_SHIFT - 12)); +} + +/* + * for abiv1 the 64bits args should be even th, So we need mov the advice + * forward. + */ +SYSCALL_DEFINE4(csky_fadvise64_64, + int, fd, + int, advice, + loff_t, offset, + loff_t, len) +{ + return ksys_fadvise64_64(fd, offset, len, advice); +} diff --git a/arch/csky/kernel/syscall_table.c b/arch/csky/kernel/syscall_table.c new file mode 100644 index 000000000..a0c238c53 --- /dev/null +++ b/arch/csky/kernel/syscall_table.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/syscalls.h> +#include <asm/syscalls.h> + +#undef __SYSCALL +#define __SYSCALL(nr, call)[nr] = (call), + +#define sys_fadvise64_64 sys_csky_fadvise64_64 +void * const sys_call_table[__NR_syscalls] __page_aligned_data = { + [0 ... __NR_syscalls - 1] = sys_ni_syscall, +#include <asm/unistd.h> +}; diff --git a/arch/csky/kernel/time.c b/arch/csky/kernel/time.c new file mode 100644 index 000000000..52379d866 --- /dev/null +++ b/arch/csky/kernel/time.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/clocksource.h> +#include <linux/of_clk.h> + +void __init time_init(void) +{ + of_clk_init(NULL); + timer_probe(); +} diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c new file mode 100644 index 000000000..15711efa1 --- /dev/null +++ b/arch/csky/kernel/traps.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/user.h> +#include <linux/string.h> +#include <linux/linkage.h> +#include <linux/init.h> +#include <linux/ptrace.h> +#include <linux/kallsyms.h> +#include <linux/rtc.h> +#include <linux/uaccess.h> +#include <linux/kprobes.h> +#include <linux/kdebug.h> +#include <linux/sched/debug.h> + +#include <asm/setup.h> +#include <asm/traps.h> +#include <asm/pgalloc.h> +#include <asm/siginfo.h> + +#include <asm/mmu_context.h> + +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +#endif + +int show_unhandled_signals = 1; + +/* Defined in entry.S */ +asmlinkage void csky_trap(void); + +asmlinkage void csky_systemcall(void); +asmlinkage void csky_cmpxchg(void); +asmlinkage void csky_get_tls(void); +asmlinkage void csky_irq(void); + +asmlinkage void csky_tlbinvalidl(void); +asmlinkage void csky_tlbinvalids(void); +asmlinkage void csky_tlbmodified(void); + +/* Defined in head.S */ +asmlinkage void _start_smp_secondary(void); + +void __init pre_trap_init(void) +{ + int i; + + mtcr("vbr", vec_base); + + for (i = 1; i < 128; i++) + VEC_INIT(i, csky_trap); +} + +void __init trap_init(void) +{ + VEC_INIT(VEC_AUTOVEC, csky_irq); + + /* setup trap0 trap2 trap3 */ + VEC_INIT(VEC_TRAP0, csky_systemcall); + VEC_INIT(VEC_TRAP2, csky_cmpxchg); + VEC_INIT(VEC_TRAP3, csky_get_tls); + + /* setup MMU TLB exception */ + VEC_INIT(VEC_TLBINVALIDL, csky_tlbinvalidl); + VEC_INIT(VEC_TLBINVALIDS, csky_tlbinvalids); + VEC_INIT(VEC_TLBMODIFIED, csky_tlbmodified); + +#ifdef CONFIG_CPU_HAS_FPU + init_fpu(); +#endif + +#ifdef CONFIG_SMP + mtcr("cr<28, 0>", virt_to_phys(vec_base)); + + VEC_INIT(VEC_RESET, (void *)virt_to_phys(_start_smp_secondary)); +#endif +} + +static DEFINE_SPINLOCK(die_lock); + +void die(struct pt_regs *regs, const char *str) +{ + static int die_counter; + int ret; + + oops_enter(); + + spin_lock_irq(&die_lock); + console_verbose(); + bust_spinlocks(1); + + pr_emerg("%s [#%d]\n", str, ++die_counter); + print_modules(); + show_regs(regs); + show_stack(current, (unsigned long *)regs->regs[4], KERN_INFO); + + ret = notify_die(DIE_OOPS, str, regs, 0, trap_no(regs), SIGSEGV); + + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irq(&die_lock); + oops_exit(); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + if (ret != NOTIFY_STOP) + make_task_dead(SIGSEGV); +} + +void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) +{ + struct task_struct *tsk = current; + + if (show_unhandled_signals && unhandled_signal(tsk, signo) + && printk_ratelimit()) { + pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x%08lx", + tsk->comm, task_pid_nr(tsk), signo, code, addr); + print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); + pr_cont("\n"); + show_regs(regs); + } + + force_sig_fault(signo, code, (void __user *)addr); +} + +static void do_trap_error(struct pt_regs *regs, int signo, int code, + unsigned long addr, const char *str) +{ + current->thread.trap_no = trap_no(regs); + + if (user_mode(regs)) { + do_trap(regs, signo, code, addr); + } else { + if (!fixup_exception(regs)) + die(regs, str); + } +} + +#define DO_ERROR_INFO(name, signo, code, str) \ +asmlinkage __visible void name(struct pt_regs *regs) \ +{ \ + do_trap_error(regs, signo, code, regs->pc, "Oops - " str); \ +} + +DO_ERROR_INFO(do_trap_unknown, + SIGILL, ILL_ILLTRP, "unknown exception"); +DO_ERROR_INFO(do_trap_zdiv, + SIGFPE, FPE_INTDIV, "error zero div exception"); +DO_ERROR_INFO(do_trap_buserr, + SIGSEGV, ILL_ILLADR, "error bus error exception"); + +asmlinkage void do_trap_misaligned(struct pt_regs *regs) +{ +#ifdef CONFIG_CPU_NEED_SOFTALIGN + csky_alignment(regs); +#else + current->thread.trap_no = trap_no(regs); + do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->pc, + "Oops - load/store address misaligned"); +#endif +} + +asmlinkage void do_trap_bkpt(struct pt_regs *regs) +{ +#ifdef CONFIG_KPROBES + if (kprobe_single_step_handler(regs)) + return; +#endif +#ifdef CONFIG_UPROBES + if (uprobe_single_step_handler(regs)) + return; +#endif + if (user_mode(regs)) { + send_sig(SIGTRAP, current, 0); + return; + } + + do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->pc, + "Oops - illegal trap exception"); +} + +asmlinkage void do_trap_illinsn(struct pt_regs *regs) +{ + current->thread.trap_no = trap_no(regs); + +#ifdef CONFIG_KPROBES + if (kprobe_breakpoint_handler(regs)) + return; +#endif +#ifdef CONFIG_UPROBES + if (uprobe_breakpoint_handler(regs)) + return; +#endif +#ifndef CONFIG_CPU_NO_USER_BKPT + if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT) { + send_sig(SIGTRAP, current, 0); + return; + } +#endif + + do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc, + "Oops - illegal instruction exception"); +} + +asmlinkage void do_trap_fpe(struct pt_regs *regs) +{ +#ifdef CONFIG_CPU_HAS_FPU + return fpu_fpe(regs); +#else + do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc, + "Oops - fpu instruction exception"); +#endif +} + +asmlinkage void do_trap_priv(struct pt_regs *regs) +{ +#ifdef CONFIG_CPU_HAS_FPU + if (user_mode(regs) && fpu_libc_helper(regs)) + return; +#endif + do_trap_error(regs, SIGILL, ILL_PRVOPC, regs->pc, + "Oops - illegal privileged exception"); +} + +asmlinkage void trap_c(struct pt_regs *regs) +{ + switch (trap_no(regs)) { + case VEC_ZERODIV: + do_trap_zdiv(regs); + break; + case VEC_TRACE: + do_trap_bkpt(regs); + break; + case VEC_ILLEGAL: + do_trap_illinsn(regs); + break; + case VEC_TRAP1: + case VEC_BREAKPOINT: + do_trap_bkpt(regs); + break; + case VEC_ACCESS: + do_trap_buserr(regs); + break; + case VEC_ALIGN: + do_trap_misaligned(regs); + break; + case VEC_FPE: + do_trap_fpe(regs); + break; + case VEC_PRIV: + do_trap_priv(regs); + break; + default: + do_trap_unknown(regs); + break; + } +} diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c new file mode 100644 index 000000000..abc3dbc65 --- /dev/null +++ b/arch/csky/kernel/vdso.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/binfmts.h> +#include <linux/elf.h> +#include <linux/vmalloc.h> +#include <linux/unistd.h> +#include <linux/uaccess.h> + +#include <asm/vdso.h> +#include <asm/cacheflush.h> + +static struct page *vdso_page; + +static int __init init_vdso(void) +{ + struct csky_vdso *vdso; + int err = 0; + + vdso_page = alloc_page(GFP_KERNEL); + if (!vdso_page) + panic("Cannot allocate vdso"); + + vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); + if (!vdso) + panic("Cannot map vdso"); + + clear_page(vdso); + + err = setup_vdso_page(vdso->rt_signal_retcode); + if (err) + panic("Cannot set signal return code, err: %x.", err); + + dcache_wb_range((unsigned long)vdso, (unsigned long)vdso + 16); + + vunmap(vdso); + + return 0; +} +subsys_initcall(init_vdso); + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + int ret; + unsigned long addr; + struct mm_struct *mm = current->mm; + + mmap_write_lock(mm); + + addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping( + mm, + addr, + PAGE_SIZE, + VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &vdso_page); + if (ret) + goto up_fail; + + mm->context.vdso = (void *)addr; + +up_fail: + mmap_write_unlock(mm); + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_mm == NULL) + return NULL; + + if (vma->vm_start == (long)vma->vm_mm->context.vdso) + return "[vdso]"; + else + return NULL; +} diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S new file mode 100644 index 000000000..f03033e17 --- /dev/null +++ b/arch/csky/kernel/vmlinux.lds.S @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <asm/vmlinux.lds.h> +#include <asm/page.h> +#include <asm/memory.h> + +OUTPUT_ARCH(csky) +ENTRY(_start) + +#ifndef __cskyBE__ +jiffies = jiffies_64; +#else +jiffies = jiffies_64 + 4; +#endif + +#define VBR_BASE \ + . = ALIGN(1024); \ + vec_base = .; \ + . += 512; + +SECTIONS +{ + . = PAGE_OFFSET + PHYS_OFFSET_OFFSET; + + _stext = .; + __init_begin = .; + HEAD_TEXT_SECTION + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(PAGE_SIZE) + PERCPU_SECTION(L1_CACHE_BYTES) + . = ALIGN(PAGE_SIZE); + __init_end = .; + + .text : AT(ADDR(.text) - LOAD_OFFSET) { + _text = .; + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + *(.fixup) + *(.gnu.warning) + } = 0 + _etext = .; + + /* __init_begin __init_end must be page aligned for free_initmem */ + . = ALIGN(PAGE_SIZE); + + + _sdata = .; + RO_DATA(PAGE_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + _edata = .; + +#ifdef CONFIG_HAVE_TCM + .tcm_start : { + . = ALIGN(PAGE_SIZE); + __tcm_start = .; + } + + .text_data_tcm FIXADDR_TCM : AT(__tcm_start) + { + . = ALIGN(4); + __stcm_text_data = .; + *(.tcm.text) + *(.tcm.rodata) +#ifndef CONFIG_HAVE_DTCM + *(.tcm.data) +#endif + . = ALIGN(4); + __etcm_text_data = .; + } + + . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm); + +#ifdef CONFIG_HAVE_DTCM + #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE + + .dtcm_start : { + __dtcm_start = .; + } + + .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start) + { + . = ALIGN(4); + __stcm_data = .; + *(.tcm.data) + . = ALIGN(4); + __etcm_data = .; + } + + . = ADDR(.dtcm_start) + SIZEOF(.data_tcm); + + .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) { +#else + .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) { +#endif + . = ALIGN(PAGE_SIZE); + __tcm_end = .; + } +#endif + + EXCEPTION_TABLE(L1_CACHE_BYTES) + BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) + VBR_BASE + _end = . ; + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + DISCARDS +} diff --git a/arch/csky/lib/Makefile b/arch/csky/lib/Makefile new file mode 100644 index 000000000..7fbdbb2c4 --- /dev/null +++ b/arch/csky/lib/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +lib-y := usercopy.o delay.o +obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/csky/lib/delay.c b/arch/csky/lib/delay.c new file mode 100644 index 000000000..22570b079 --- /dev/null +++ b/arch/csky/lib/delay.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> + +void __delay(unsigned long loops) +{ + asm volatile ( + "mov r0, r0\n" + "1:declt %0\n" + "bf 1b" + : "=r"(loops) + : "0"(loops)); +} +EXPORT_SYMBOL(__delay); + +void __const_udelay(unsigned long xloops) +{ + unsigned long long loops; + + loops = (unsigned long long)xloops * loops_per_jiffy * HZ; + + __delay(loops >> 32); +} +EXPORT_SYMBOL(__const_udelay); + +void __udelay(unsigned long usecs) +{ + __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */ +} +EXPORT_SYMBOL(__udelay); + +void __ndelay(unsigned long nsecs) +{ + __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */ +} +EXPORT_SYMBOL(__ndelay); diff --git a/arch/csky/lib/error-inject.c b/arch/csky/lib/error-inject.c new file mode 100644 index 000000000..c15fb36fe --- /dev/null +++ b/arch/csky/lib/error-inject.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/error-injection.h> +#include <linux/kprobes.h> + +void override_function_with_return(struct pt_regs *regs) +{ + instruction_pointer_set(regs, regs->lr); +} +NOKPROBE_SYMBOL(override_function_with_return); diff --git a/arch/csky/lib/usercopy.c b/arch/csky/lib/usercopy.c new file mode 100644 index 000000000..3c9bd645e --- /dev/null +++ b/arch/csky/lib/usercopy.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/uaccess.h> +#include <linux/types.h> + +unsigned long raw_copy_from_user(void *to, const void *from, + unsigned long n) +{ + ___copy_from_user(to, from, n); + return n; +} +EXPORT_SYMBOL(raw_copy_from_user); + +unsigned long raw_copy_to_user(void *to, const void *from, + unsigned long n) +{ + ___copy_to_user(to, from, n); + return n; +} +EXPORT_SYMBOL(raw_copy_to_user); + + +/* + * copy a null terminated string from userspace. + */ +#define __do_strncpy_from_user(dst, src, count, res) \ +do { \ + int tmp; \ + long faultres; \ + asm volatile( \ + " cmpnei %3, 0 \n" \ + " bf 4f \n" \ + "1: cmpnei %1, 0 \n" \ + " bf 5f \n" \ + "2: ldb %4, (%3, 0) \n" \ + " stb %4, (%2, 0) \n" \ + " cmpnei %4, 0 \n" \ + " bf 3f \n" \ + " addi %3, 1 \n" \ + " addi %2, 1 \n" \ + " subi %1, 1 \n" \ + " br 1b \n" \ + "3: subu %0, %1 \n" \ + " br 5f \n" \ + "4: mov %0, %5 \n" \ + " br 5f \n" \ + ".section __ex_table, \"a\" \n" \ + ".align 2 \n" \ + ".long 2b, 4b \n" \ + ".previous \n" \ + "5: \n" \ + : "=r"(res), "=r"(count), "=r"(dst), \ + "=r"(src), "=r"(tmp), "=r"(faultres) \ + : "5"(-EFAULT), "0"(count), "1"(count), \ + "2"(dst), "3"(src) \ + : "memory", "cc"); \ +} while (0) + +/* + * __strncpy_from_user: - Copy a NUL terminated string from userspace, + * with less checking. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Source address, in user space. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from userspace to kernel space. + * Caller must check the specified block with access_ok() before calling + * this function. + * + * On success, returns the length of the string (not including the trailing + * NUL). + * + * If access to userspace fails, returns -EFAULT (some data may have been + * copied). + * + * If @count is smaller than the length of the string, copies @count bytes + * and returns @count. + */ +long __strncpy_from_user(char *dst, const char *src, long count) +{ + long res; + + __do_strncpy_from_user(dst, src, count, res); + return res; +} +EXPORT_SYMBOL(__strncpy_from_user); + +/* + * strncpy_from_user: - Copy a NUL terminated string from userspace. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Source address, in user space. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from userspace to kernel space. + * + * On success, returns the length of the string (not including the trailing + * NUL). + * + * If access to userspace fails, returns -EFAULT (some data may have been + * copied). + * + * If @count is smaller than the length of the string, copies @count bytes + * and returns @count. + */ +long strncpy_from_user(char *dst, const char *src, long count) +{ + long res = -EFAULT; + + if (access_ok(src, 1)) + __do_strncpy_from_user(dst, src, count, res); + return res; +} +EXPORT_SYMBOL(strncpy_from_user); + +/* + * strlen_user: - Get the size of a string in user space. + * @str: The string to measure. + * @n: The maximum valid length + * + * Get the size of a NUL-terminated string in user space. + * + * Returns the size of the string INCLUDING the terminating NUL. + * On exception, returns 0. + * If the string is too long, returns a value greater than @n. + */ +long strnlen_user(const char *s, long n) +{ + unsigned long res, tmp; + + if (s == NULL) + return 0; + + asm volatile( + " cmpnei %1, 0 \n" + " bf 3f \n" + "1: cmpnei %0, 0 \n" + " bf 3f \n" + "2: ldb %3, (%1, 0) \n" + " cmpnei %3, 0 \n" + " bf 3f \n" + " subi %0, 1 \n" + " addi %1, 1 \n" + " br 1b \n" + "3: subu %2, %0 \n" + " addi %2, 1 \n" + " br 5f \n" + "4: movi %0, 0 \n" + " br 5f \n" + ".section __ex_table, \"a\" \n" + ".align 2 \n" + ".long 2b, 4b \n" + ".previous \n" + "5: \n" + : "=r"(n), "=r"(s), "=r"(res), "=r"(tmp) + : "0"(n), "1"(s), "2"(n) + : "memory", "cc"); + + return res; +} +EXPORT_SYMBOL(strnlen_user); + +#define __do_clear_user(addr, size) \ +do { \ + int __d0, zvalue, tmp; \ + \ + asm volatile( \ + "0: cmpnei %1, 0 \n" \ + " bf 7f \n" \ + " mov %3, %1 \n" \ + " andi %3, 3 \n" \ + " cmpnei %3, 0 \n" \ + " bf 1f \n" \ + " br 5f \n" \ + "1: cmplti %0, 32 \n" /* 4W */ \ + " bt 3f \n" \ + "8: stw %2, (%1, 0) \n" \ + "10: stw %2, (%1, 4) \n" \ + "11: stw %2, (%1, 8) \n" \ + "12: stw %2, (%1, 12) \n" \ + "13: stw %2, (%1, 16) \n" \ + "14: stw %2, (%1, 20) \n" \ + "15: stw %2, (%1, 24) \n" \ + "16: stw %2, (%1, 28) \n" \ + " addi %1, 32 \n" \ + " subi %0, 32 \n" \ + " br 1b \n" \ + "3: cmplti %0, 4 \n" /* 1W */ \ + " bt 5f \n" \ + "4: stw %2, (%1, 0) \n" \ + " addi %1, 4 \n" \ + " subi %0, 4 \n" \ + " br 3b \n" \ + "5: cmpnei %0, 0 \n" /* 1B */ \ + "9: bf 7f \n" \ + "6: stb %2, (%1, 0) \n" \ + " addi %1, 1 \n" \ + " subi %0, 1 \n" \ + " br 5b \n" \ + ".section __ex_table,\"a\" \n" \ + ".align 2 \n" \ + ".long 8b, 9b \n" \ + ".long 10b, 9b \n" \ + ".long 11b, 9b \n" \ + ".long 12b, 9b \n" \ + ".long 13b, 9b \n" \ + ".long 14b, 9b \n" \ + ".long 15b, 9b \n" \ + ".long 16b, 9b \n" \ + ".long 4b, 9b \n" \ + ".long 6b, 9b \n" \ + ".previous \n" \ + "7: \n" \ + : "=r"(size), "=r" (__d0), \ + "=r"(zvalue), "=r"(tmp) \ + : "0"(size), "1"(addr), "2"(0) \ + : "memory", "cc"); \ +} while (0) + +/* + * clear_user: - Zero a block of memory in user space. + * @to: Destination address, in user space. + * @n: Number of bytes to zero. + * + * Zero a block of memory in user space. + * + * Returns number of bytes that could not be cleared. + * On success, this will be zero. + */ +unsigned long +clear_user(void __user *to, unsigned long n) +{ + if (access_ok(to, n)) + __do_clear_user(to, n); + return n; +} +EXPORT_SYMBOL(clear_user); + +/* + * __clear_user: - Zero a block of memory in user space, with less checking. + * @to: Destination address, in user space. + * @n: Number of bytes to zero. + * + * Zero a block of memory in user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be cleared. + * On success, this will be zero. + */ +unsigned long +__clear_user(void __user *to, unsigned long n) +{ + __do_clear_user(to, n); + return n; +} +EXPORT_SYMBOL(__clear_user); diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile new file mode 100644 index 000000000..6e7696e55 --- /dev/null +++ b/arch/csky/mm/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) +obj-y += cachev2.o +CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE) +else +obj-y += cachev1.o +CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE) +endif + +obj-y += dma-mapping.o +obj-y += fault.o +obj-$(CONFIG_HIGHMEM) += highmem.o +obj-y += init.o +obj-y += ioremap.o +obj-y += syscache.o +obj-y += tlb.o +obj-y += asid.o +obj-y += context.o +obj-$(CONFIG_HAVE_TCM) += tcm.o diff --git a/arch/csky/mm/asid.c b/arch/csky/mm/asid.c new file mode 100644 index 000000000..b2e914745 --- /dev/null +++ b/arch/csky/mm/asid.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic ASID allocator. + * + * Based on arch/arm/mm/context.c + * + * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/slab.h> +#include <linux/mm_types.h> + +#include <asm/asid.h> + +#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu) + +#define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0)) +#define ASID_FIRST_VERSION(info) (1UL << ((info)->bits)) + +#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) +#define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info)) + +static void flush_context(struct asid_info *info) +{ + int i; + u64 asid; + + /* Update the list of reserved ASIDs and the ASID bitmap. */ + bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info)); + + for_each_possible_cpu(i) { + asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * ASID, as this is the only trace we have of + * the process it is still running. + */ + if (asid == 0) + asid = reserved_asid(info, i); + __set_bit(asid2idx(info, asid), info->map); + reserved_asid(info, i) = asid; + } + + /* + * Queue a TLB invalidation for each CPU to perform on next + * context-switch + */ + cpumask_setall(&info->flush_pending); +} + +static bool check_update_reserved_asid(struct asid_info *info, u64 asid, + u64 newasid) +{ + int cpu; + bool hit = false; + + /* + * Iterate over the set of reserved ASIDs looking for a match. + * If we find one, then we can update our mm to use newasid + * (i.e. the same ASID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old ASID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved ASID in a future + * generation. + */ + for_each_possible_cpu(cpu) { + if (reserved_asid(info, cpu) == asid) { + hit = true; + reserved_asid(info, cpu) = newasid; + } + } + + return hit; +} + +static u64 new_context(struct asid_info *info, atomic64_t *pasid, + struct mm_struct *mm) +{ + static u32 cur_idx = 1; + u64 asid = atomic64_read(pasid); + u64 generation = atomic64_read(&info->generation); + + if (asid != 0) { + u64 newasid = generation | (asid & ~ASID_MASK(info)); + + /* + * If our current ASID was active during a rollover, we + * can continue to use it and this was just a false alarm. + */ + if (check_update_reserved_asid(info, asid, newasid)) + return newasid; + + /* + * We had a valid ASID in a previous life, so try to re-use + * it if possible. + */ + if (!__test_and_set_bit(asid2idx(info, asid), info->map)) + return newasid; + } + + /* + * Allocate a free ASID. If we can't find one, take a note of the + * currently active ASIDs and mark the TLBs as requiring flushes. We + * always count from ASID #2 (index 1), as we use ASID #0 when setting + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd + * pairs. + */ + asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx); + if (asid != NUM_CTXT_ASIDS(info)) + goto set_asid; + + /* We're out of ASIDs, so increment the global generation count */ + generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info), + &info->generation); + flush_context(info); + + /* We have more ASIDs than CPUs, so this will always succeed */ + asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1); + +set_asid: + __set_bit(asid, info->map); + cur_idx = asid; + cpumask_clear(mm_cpumask(mm)); + return idx2asid(info, asid) | generation; +} + +/* + * Generate a new ASID for the context. + * + * @pasid: Pointer to the current ASID batch allocated. It will be updated + * with the new ASID batch. + * @cpu: current CPU ID. Must have been acquired through get_cpu() + */ +void asid_new_context(struct asid_info *info, atomic64_t *pasid, + unsigned int cpu, struct mm_struct *mm) +{ + unsigned long flags; + u64 asid; + + raw_spin_lock_irqsave(&info->lock, flags); + /* Check that our ASID belongs to the current generation. */ + asid = atomic64_read(pasid); + if ((asid ^ atomic64_read(&info->generation)) >> info->bits) { + asid = new_context(info, pasid, mm); + atomic64_set(pasid, asid); + } + + if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending)) + info->flush_cpu_ctxt_cb(); + + atomic64_set(&active_asid(info, cpu), asid); + cpumask_set_cpu(cpu, mm_cpumask(mm)); + raw_spin_unlock_irqrestore(&info->lock, flags); +} + +/* + * Initialize the ASID allocator + * + * @info: Pointer to the asid allocator structure + * @bits: Number of ASIDs available + * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are + * allocated contiguously for a given context. This value should be a power of + * 2. + */ +int asid_allocator_init(struct asid_info *info, + u32 bits, unsigned int asid_per_ctxt, + void (*flush_cpu_ctxt_cb)(void)) +{ + info->bits = bits; + info->ctxt_shift = ilog2(asid_per_ctxt); + info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb; + /* + * Expect allocation after rollover to fail if we don't have at least + * one more ASID than CPUs. ASID #0 is always reserved. + */ + WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus()); + atomic64_set(&info->generation, ASID_FIRST_VERSION(info)); + info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)), + sizeof(*info->map), GFP_KERNEL); + if (!info->map) + return -ENOMEM; + + raw_spin_lock_init(&info->lock); + + return 0; +} diff --git a/arch/csky/mm/cachev1.c b/arch/csky/mm/cachev1.c new file mode 100644 index 000000000..5a5a9804a --- /dev/null +++ b/arch/csky/mm/cachev1.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/spinlock.h> +#include <asm/cache.h> +#include <abi/reg_ops.h> + +/* for L1-cache */ +#define INS_CACHE (1 << 0) +#define DATA_CACHE (1 << 1) +#define CACHE_INV (1 << 4) +#define CACHE_CLR (1 << 5) +#define CACHE_OMS (1 << 6) +#define CACHE_ITS (1 << 7) +#define CACHE_LICF (1 << 31) + +/* for L2-cache */ +#define CR22_LEVEL_SHIFT (1) +#define CR22_SET_SHIFT (7) +#define CR22_WAY_SHIFT (30) +#define CR22_WAY_SHIFT_L2 (29) + +static DEFINE_SPINLOCK(cache_lock); + +static inline void cache_op_line(unsigned long i, unsigned int val) +{ + mtcr("cr22", i); + mtcr("cr17", val); +} + +#define CCR2_L2E (1 << 3) +static void cache_op_all(unsigned int value, unsigned int l2) +{ + mtcr("cr17", value | CACHE_CLR); + mb(); + + if (l2 && (mfcr_ccr2() & CCR2_L2E)) { + mtcr("cr24", value | CACHE_CLR); + mb(); + } +} + +static void cache_op_range( + unsigned int start, + unsigned int end, + unsigned int value, + unsigned int l2) +{ + unsigned long i, flags; + unsigned int val = value | CACHE_CLR | CACHE_OMS; + bool l2_sync; + + if (unlikely((end - start) >= PAGE_SIZE) || + unlikely(start < PAGE_OFFSET) || + unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) { + cache_op_all(value, l2); + return; + } + + if ((mfcr_ccr2() & CCR2_L2E) && l2) + l2_sync = 1; + else + l2_sync = 0; + + spin_lock_irqsave(&cache_lock, flags); + + i = start & ~(L1_CACHE_BYTES - 1); + for (; i < end; i += L1_CACHE_BYTES) { + cache_op_line(i, val); + if (l2_sync) { + mb(); + mtcr("cr24", val); + } + } + spin_unlock_irqrestore(&cache_lock, flags); + + mb(); +} + +void dcache_wb_line(unsigned long start) +{ + asm volatile("idly4\n":::"memory"); + cache_op_line(start, DATA_CACHE|CACHE_CLR); + mb(); +} + +void icache_inv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, INS_CACHE|CACHE_INV, 0); +} + +void icache_inv_all(void) +{ + cache_op_all(INS_CACHE|CACHE_INV, 0); +} + +void local_icache_inv_all(void *priv) +{ + cache_op_all(INS_CACHE|CACHE_INV, 0); +} + +void dcache_wb_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); +} + +void dcache_wbinv_all(void) +{ + cache_op_all(DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} + +void cache_wbinv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} +EXPORT_SYMBOL(cache_wbinv_range); + +void cache_wbinv_all(void) +{ + cache_op_all(INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} + +void dma_wbinv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); +} + +void dma_inv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); +} + +void dma_wb_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); +} diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c new file mode 100644 index 000000000..7a9664adc --- /dev/null +++ b/arch/csky/mm/cachev2.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/spinlock.h> +#include <linux/smp.h> +#include <linux/mm.h> +#include <asm/cache.h> +#include <asm/barrier.h> + +/* for L1-cache */ +#define INS_CACHE (1 << 0) +#define DATA_CACHE (1 << 1) +#define CACHE_INV (1 << 4) +#define CACHE_CLR (1 << 5) +#define CACHE_OMS (1 << 6) + +void local_icache_inv_all(void *priv) +{ + mtcr("cr17", INS_CACHE|CACHE_INV); + sync_is(); +} + +#ifdef CONFIG_CPU_HAS_ICACHE_INS +void icache_inv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("icache.iva %0\n"::"r"(i):"memory"); + sync_is(); +} +#else +struct cache_range { + unsigned long start; + unsigned long end; +}; + +static DEFINE_SPINLOCK(cache_lock); + +static inline void cache_op_line(unsigned long i, unsigned int val) +{ + mtcr("cr22", i); + mtcr("cr17", val); +} + +void local_icache_inv_range(void *priv) +{ + struct cache_range *param = priv; + unsigned long i = param->start & ~(L1_CACHE_BYTES - 1); + unsigned long flags; + + spin_lock_irqsave(&cache_lock, flags); + + for (; i < param->end; i += L1_CACHE_BYTES) + cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS); + + spin_unlock_irqrestore(&cache_lock, flags); + + sync_is(); +} + +void icache_inv_range(unsigned long start, unsigned long end) +{ + struct cache_range param = { start, end }; + + if (irqs_disabled()) + local_icache_inv_range(¶m); + else + on_each_cpu(local_icache_inv_range, ¶m, 1); +} +#endif + +inline void dcache_wb_line(unsigned long start) +{ + asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); + sync_is(); +} + +void dcache_wb_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); + sync_is(); +} + +void cache_wbinv_range(unsigned long start, unsigned long end) +{ + dcache_wb_range(start, end); + icache_inv_range(start, end); +} +EXPORT_SYMBOL(cache_wbinv_range); + +void dma_wbinv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); + sync_is(); +} + +void dma_inv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.iva %0\n"::"r"(i):"memory"); + sync_is(); +} + +void dma_wb_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.cva %0\n"::"r"(i):"memory"); + sync_is(); +} diff --git a/arch/csky/mm/context.c b/arch/csky/mm/context.c new file mode 100644 index 000000000..0d95bdd93 --- /dev/null +++ b/arch/csky/mm/context.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/bitops.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/mm.h> + +#include <asm/asid.h> +#include <asm/mmu_context.h> +#include <asm/smp.h> +#include <asm/tlbflush.h> + +static DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(u64, reserved_asids); + +struct asid_info asid_info; + +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) +{ + asid_check_context(&asid_info, &mm->context.asid, cpu, mm); +} + +static void asid_flush_cpu_ctxt(void) +{ + local_tlb_invalid_all(); +} + +static int asids_init(void) +{ + BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus()); + + if (asid_allocator_init(&asid_info, CONFIG_CPU_ASID_BITS, 1, + asid_flush_cpu_ctxt)) + panic("Unable to initialize ASID allocator for %lu ASIDs\n", + NUM_ASIDS(&asid_info)); + + asid_info.active = &active_asids; + asid_info.reserved = &reserved_asids; + + pr_info("ASID allocator initialised with %lu entries\n", + NUM_CTXT_ASIDS(&asid_info)); + + return 0; +} +early_initcall(asids_init); diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c new file mode 100644 index 000000000..c3a775a7e --- /dev/null +++ b/arch/csky/mm/dma-mapping.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/cache.h> +#include <linux/dma-map-ops.h> +#include <linux/genalloc.h> +#include <linux/highmem.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/types.h> +#include <linux/version.h> +#include <asm/cache.h> + +static inline void cache_op(phys_addr_t paddr, size_t size, + void (*fn)(unsigned long start, unsigned long end)) +{ + struct page *page = phys_to_page(paddr); + void *start = __va(page_to_phys(page)); + unsigned long offset = offset_in_page(paddr); + size_t left = size; + + do { + size_t len = left; + + if (offset + len > PAGE_SIZE) + len = PAGE_SIZE - offset; + + if (PageHighMem(page)) { + start = kmap_atomic(page); + + fn((unsigned long)start + offset, + (unsigned long)start + offset + len); + + kunmap_atomic(start); + } else { + fn((unsigned long)start + offset, + (unsigned long)start + offset + len); + } + offset = 0; + + page++; + start += PAGE_SIZE; + left -= len; + } while (left); +} + +static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end) +{ + memset((void *)start, 0, end - start); + dma_wbinv_range(start, end); +} + +void arch_dma_prep_coherent(struct page *page, size_t size) +{ + cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range); +} + +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + cache_op(paddr, size, dma_wb_range); + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(paddr, size, dma_wbinv_range); + break; + default: + BUG(); + } +} + +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + return; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(paddr, size, dma_inv_range); + break; + default: + BUG(); + } +} diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c new file mode 100644 index 000000000..081b178b4 --- /dev/null +++ b/arch/csky/mm/fault.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/signal.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/version.h> +#include <linux/vt_kern.h> +#include <linux/extable.h> +#include <linux/uaccess.h> +#include <linux/perf_event.h> +#include <linux/kprobes.h> + +#include <asm/hardirq.h> +#include <asm/mmu_context.h> +#include <asm/traps.h> +#include <asm/page.h> + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(instruction_pointer(regs)); + if (fixup) { + regs->pc = fixup->nextinsn; + + return 1; + } + + return 0; +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + */ +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, + unsigned long mmu_meh) +{ + struct vm_area_struct *vma = NULL; + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + int si_code; + int fault; + unsigned long address = mmu_meh & PAGE_MASK; + + if (kprobe_page_fault(regs, tsk->thread.trap_no)) + return; + + si_code = SEGV_MAPERR; + +#ifndef CONFIG_CPU_HAS_TLBI + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + */ + if (unlikely(address >= VMALLOC_START) && + unlikely(address <= VMALLOC_END)) { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + int offset = pgd_index(address); + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + + unsigned long pgd_base; + + pgd_base = (unsigned long)__va(get_pgd()); + pgd = (pgd_t *)pgd_base + offset; + pgd_k = init_mm.pgd + offset; + + if (!pgd_present(*pgd_k)) + goto no_context; + set_pgd(pgd, *pgd_k); + + pud = (pud_t *)pgd; + pud_k = (pud_t *)pgd_k; + if (!pud_present(*pud_k)) + goto no_context; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + goto no_context; + set_pmd(pmd, *pmd_k); + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + goto no_context; + return; + } +#endif + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_atomic() || !mm) + goto bad_area_nosemaphore; + + mmap_read_lock(mm); + vma = find_vma(mm, address); + if (!vma) + goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + si_code = SEGV_ACCERR; + + if (write) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + if (unlikely(!vma_is_accessible(vma))) + goto bad_area; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0, + regs); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; + BUG(); + } + mmap_read_unlock(mm); + return; + + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + mmap_read_unlock(mm); + +bad_area_nosemaphore: + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + tsk->thread.trap_no = trap_no(regs); + force_sig_fault(SIGSEGV, si_code, (void __user *)address); + return; + } + +no_context: + tsk->thread.trap_no = trap_no(regs); + + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + bust_spinlocks(1); + pr_alert("Unable to handle kernel paging request at virtual " + "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc); + die(regs, "Oops"); + +out_of_memory: + tsk->thread.trap_no = trap_no(regs); + + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + pagefault_out_of_memory(); + return; + +do_sigbus: + tsk->thread.trap_no = trap_no(regs); + + mmap_read_unlock(mm); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + goto no_context; + + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); +} diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c new file mode 100644 index 000000000..89c10800a --- /dev/null +++ b/arch/csky/mm/highmem.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/module.h> +#include <linux/highmem.h> +#include <linux/smp.h> +#include <linux/memblock.h> +#include <asm/fixmap.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> + +static pte_t *kmap_pte; + +unsigned long highstart_pfn, highend_pfn; + +void kmap_flush_tlb(unsigned long addr) +{ + flush_tlb_one(addr); +} +EXPORT_SYMBOL(kmap_flush_tlb); + +void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) +{ + unsigned long vaddr; + int idx, type; + + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(*(kmap_pte - idx))); +#endif + set_pte(kmap_pte-idx, mk_pte(page, prot)); + flush_tlb_one((unsigned long)vaddr); + + return (void *)vaddr; +} +EXPORT_SYMBOL(kmap_atomic_high_prot); + +void kunmap_atomic_high(void *kvaddr) +{ + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + int idx; + + if (vaddr < FIXADDR_START) + return; + +#ifdef CONFIG_DEBUG_HIGHMEM + idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx(); + + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + + pte_clear(&init_mm, vaddr, kmap_pte - idx); + flush_tlb_one(vaddr); +#else + (void) idx; /* to kill a warning */ +#endif + kmap_atomic_idx_pop(); +} +EXPORT_SYMBOL(kunmap_atomic_high); + +/* + * This is the same as kmap_atomic() but can map memory that doesn't + * have a struct page associated with it. + */ +void *kmap_atomic_pfn(unsigned long pfn) +{ + unsigned long vaddr; + int idx, type; + + pagefault_disable(); + + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); + flush_tlb_one(vaddr); + + return (void *) vaddr; +} + +static void __init kmap_pages_init(void) +{ + unsigned long vaddr; + pgd_t *pgd; + pmd_t *pmd; + pud_t *pud; + pte_t *pte; + + vaddr = PKMAP_BASE; + fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); + + pgd = swapper_pg_dir + pgd_index(vaddr); + pud = (pud_t *)pgd; + pmd = pmd_offset(pud, vaddr); + pte = pte_offset_kernel(pmd, vaddr); + pkmap_page_table = pte; +} + +void __init kmap_init(void) +{ + unsigned long vaddr; + + kmap_pages_init(); + + vaddr = __fix_to_virt(FIX_KMAP_BEGIN); + + kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); +} diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c new file mode 100644 index 000000000..af6271283 --- /dev/null +++ b/arch/csky/mm/init.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/bug.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/pagemap.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/highmem.h> +#include <linux/memblock.h> +#include <linux/swap.h> +#include <linux/proc_fs.h> +#include <linux/pfn.h> +#include <linux/initrd.h> + +#include <asm/setup.h> +#include <asm/cachectl.h> +#include <asm/dma.h> +#include <asm/pgalloc.h> +#include <asm/mmu_context.h> +#include <asm/sections.h> +#include <asm/tlb.h> + +pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +EXPORT_SYMBOL(invalid_pte_table); +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] + __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); + +#ifdef CONFIG_BLK_DEV_INITRD +static void __init setup_initrd(void) +{ + unsigned long size; + + if (initrd_start >= initrd_end) { + pr_err("initrd not found or empty"); + goto disable; + } + + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + pr_err("initrd extends beyond end of memory"); + goto disable; + } + + size = initrd_end - initrd_start; + + if (memblock_is_region_reserved(__pa(initrd_start), size)) { + pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", + __pa(initrd_start), size); + goto disable; + } + + memblock_reserve(__pa(initrd_start), size); + + pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *)(initrd_start), size); + + initrd_below_start_ok = 1; + + return; + +disable: + initrd_start = initrd_end = 0; + + pr_err(" - disabling initrd\n"); +} +#endif + +void __init mem_init(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long tmp; + + max_mapnr = highend_pfn; +#else + max_mapnr = max_low_pfn; +#endif + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + +#ifdef CONFIG_BLK_DEV_INITRD + setup_initrd(); +#endif + + memblock_free_all(); + +#ifdef CONFIG_HIGHMEM + for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { + struct page *page = pfn_to_page(tmp); + + /* FIXME not sure about */ + if (!memblock_is_reserved(tmp << PAGE_SHIFT)) + free_highmem_page(page); + } +#endif + mem_init_print_info(NULL); +} + +extern char __init_begin[], __init_end[]; + +void free_initmem(void) +{ + unsigned long addr; + + addr = (unsigned long) &__init_begin; + + while (addr < (unsigned long) &__init_end) { + ClearPageReserved(virt_to_page(addr)); + init_page_count(virt_to_page(addr)); + free_page(addr); + totalram_pages_inc(); + addr += PAGE_SIZE; + } + + pr_info("Freeing unused kernel memory: %dk freed\n", + ((unsigned int)&__init_end - (unsigned int)&__init_begin) >> 10); +} + +void pgd_init(unsigned long *p) +{ + int i; + + for (i = 0; i < PTRS_PER_PGD; i++) + p[i] = __pa(invalid_pte_table); +} + +void __init pre_mmu_init(void) +{ + /* + * Setup page-table and enable TLB-hardrefill + */ + flush_tlb_all(); + pgd_init((unsigned long *)swapper_pg_dir); + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); + TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); + + /* Setup page mask to 4k */ + write_mmu_pagemask(0); +} + +void __init fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + int i, j, k; + unsigned long vaddr; + + vaddr = start; + i = pgd_index(vaddr); + j = pud_index(vaddr); + k = pmd_index(vaddr); + pgd = pgd_base + i; + + for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + pud = (pud_t *)pgd; + for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + pmd = (pmd_t *)pud; + for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + if (pmd_none(*pmd)) { + pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, + PAGE_SIZE); + + set_pmd(pmd, __pmd(__pa(pte))); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); + } + vaddr += PMD_SIZE; + } + k = 0; + } + j = 0; + } +} + +void __init fixaddr_init(void) +{ + unsigned long vaddr; + + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; + fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); +} diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c new file mode 100644 index 000000000..70c8268d3 --- /dev/null +++ b/arch/csky/mm/ioremap.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/io.h> + +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) { + return pgprot_noncached(vma_prot); + } else if (file->f_flags & O_SYNC) { + return pgprot_writecombine(vma_prot); + } + + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c new file mode 100644 index 000000000..cd847ad62 --- /dev/null +++ b/arch/csky/mm/syscache.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/syscalls.h> +#include <asm/page.h> +#include <asm/cacheflush.h> +#include <asm/cachectl.h> + +SYSCALL_DEFINE3(cacheflush, + void __user *, addr, + unsigned long, bytes, + int, cache) +{ + switch (cache) { + case BCACHE: + case DCACHE: + dcache_wb_range((unsigned long)addr, + (unsigned long)addr + bytes); + if (cache != BCACHE) + break; + fallthrough; + case ICACHE: + flush_icache_mm_range(current->mm, + (unsigned long)addr, + (unsigned long)addr + bytes); + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/arch/csky/mm/tcm.c b/arch/csky/mm/tcm.c new file mode 100644 index 000000000..ddeb36328 --- /dev/null +++ b/arch/csky/mm/tcm.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/highmem.h> +#include <linux/genalloc.h> +#include <asm/tlbflush.h> +#include <asm/fixmap.h> + +#if (CONFIG_ITCM_RAM_BASE == 0xffffffff) +#error "You should define ITCM_RAM_BASE" +#endif + +#ifdef CONFIG_HAVE_DTCM +#if (CONFIG_DTCM_RAM_BASE == 0xffffffff) +#error "You should define DTCM_RAM_BASE" +#endif + +#if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE) +#error "You should define correct DTCM_RAM_BASE" +#endif +#endif + +extern char __tcm_start, __tcm_end, __dtcm_start; + +static struct gen_pool *tcm_pool; + +static void __init tcm_mapping_init(void) +{ + pte_t *tcm_pte; + unsigned long vaddr, paddr; + int i; + + paddr = CONFIG_ITCM_RAM_BASE; + + if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE))) + goto panic; + +#ifndef CONFIG_HAVE_DTCM + for (i = 0; i < TCM_NR_PAGES; i++) { +#else + for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) { +#endif + vaddr = __fix_to_virt(FIX_TCM - i); + + tcm_pte = + pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); + + set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); + + flush_tlb_one(vaddr); + + paddr = paddr + PAGE_SIZE; + } + +#ifdef CONFIG_HAVE_DTCM + if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE))) + goto panic; + + paddr = CONFIG_DTCM_RAM_BASE; + + for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) { + vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i); + + tcm_pte = + pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr); + + set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); + + flush_tlb_one(vaddr); + + paddr = paddr + PAGE_SIZE; + } +#endif + +#ifndef CONFIG_HAVE_DTCM + memcpy((void *)__fix_to_virt(FIX_TCM), + &__tcm_start, &__tcm_end - &__tcm_start); + + pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); + + pr_info("%s: __tcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start); +#else + memcpy((void *)__fix_to_virt(FIX_TCM), + &__tcm_start, &__dtcm_start - &__tcm_start); + + pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); + + pr_info("%s: __itcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start); + + memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), + &__dtcm_start, &__tcm_end - &__dtcm_start); + + pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), + CONFIG_DTCM_RAM_BASE); + + pr_info("%s: __dtcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start); + +#endif + return; +panic: + panic("TCM init error"); +} + +void *tcm_alloc(size_t len) +{ + unsigned long vaddr; + + if (!tcm_pool) + return NULL; + + vaddr = gen_pool_alloc(tcm_pool, len); + if (!vaddr) + return NULL; + + return (void *) vaddr; +} +EXPORT_SYMBOL(tcm_alloc); + +void tcm_free(void *addr, size_t len) +{ + gen_pool_free(tcm_pool, (unsigned long) addr, len); +} +EXPORT_SYMBOL(tcm_free); + +static int __init tcm_setup_pool(void) +{ +#ifndef CONFIG_HAVE_DTCM + u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE) + - (u32) (&__tcm_end - &__tcm_start); + + u32 tcm_pool_start = __fix_to_virt(FIX_TCM) + + (u32) (&__tcm_end - &__tcm_start); +#else + u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE) + - (u32) (&__tcm_end - &__dtcm_start); + + u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES) + + (u32) (&__tcm_end - &__dtcm_start); +#endif + int ret; + + tcm_pool = gen_pool_create(2, -1); + + ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1); + if (ret) { + pr_err("%s: gen_pool add failed!\n", __func__); + return ret; + } + + pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n", + __func__, pool_size, tcm_pool_start); + + return 0; +} + +static int __init tcm_init(void) +{ + tcm_mapping_init(); + + tcm_setup_pool(); + + return 0; +} +arch_initcall(tcm_init); diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c new file mode 100644 index 000000000..ed1512381 --- /dev/null +++ b/arch/csky/mm/tlb.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> + +#include <asm/mmu_context.h> +#include <asm/setup.h> + +/* + * One C-SKY MMU TLB entry contain two PFN/page entry, ie: + * 1VPN -> 2PFN + */ +#define TLB_ENTRY_SIZE (PAGE_SIZE * 2) +#define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1) + +void flush_tlb_all(void) +{ + tlb_invalid_all(); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm))); +#else + tlb_invalid_all(); +#endif +} + +/* + * MMU operation regs only could invalid tlb entry in jtlb and we + * need change asid field to invalid I-utlb & D-utlb. + */ +#ifndef CONFIG_CPU_HAS_TLBI +#define restore_asid_inv_utlb(oldpid, newpid) \ +do { \ + if (oldpid == newpid) \ + write_mmu_entryhi(oldpid + 1); \ + write_mmu_entryhi(oldpid); \ +} while (0) +#endif + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + unsigned long newpid = cpu_asid(vma->vm_mm); + + start &= TLB_ENTRY_SIZE_MASK; + end += TLB_ENTRY_SIZE - 1; + end &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + while (start < end) { + asm volatile("tlbi.vas %0"::"r"(start | newpid)); + start += 2*PAGE_SIZE; + } + sync_is(); +#else + { + unsigned long flags, oldpid; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + while (start < end) { + int idx; + + write_mmu_entryhi(start | newpid); + start += 2*PAGE_SIZE; + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, newpid); + local_irq_restore(flags); + } +#endif +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + start &= TLB_ENTRY_SIZE_MASK; + end += TLB_ENTRY_SIZE - 1; + end &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + while (start < end) { + asm volatile("tlbi.vaas %0"::"r"(start)); + start += 2*PAGE_SIZE; + } + sync_is(); +#else + { + unsigned long flags, oldpid; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + while (start < end) { + int idx; + + write_mmu_entryhi(start | oldpid); + start += 2*PAGE_SIZE; + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, oldpid); + local_irq_restore(flags); + } +#endif +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + int newpid = cpu_asid(vma->vm_mm); + + addr &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.vas %0"::"r"(addr | newpid)); + sync_is(); +#else + { + int oldpid, idx; + unsigned long flags; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + write_mmu_entryhi(addr | newpid); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + + restore_asid_inv_utlb(oldpid, newpid); + local_irq_restore(flags); + } +#endif +} + +void flush_tlb_one(unsigned long addr) +{ + addr &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.vaas %0"::"r"(addr)); + sync_is(); +#else + { + int oldpid, idx; + unsigned long flags; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + write_mmu_entryhi(addr | oldpid); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + + restore_asid_inv_utlb(oldpid, oldpid); + local_irq_restore(flags); + } +#endif +} +EXPORT_SYMBOL(flush_tlb_one); |