diff options
Diffstat (limited to '')
34 files changed, 5709 insertions, 0 deletions
diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile new file mode 100644 index 000000000..37f37c0e9 --- /dev/null +++ b/arch/csky/kernel/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +extra-y := head.o vmlinux.lds + +obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o +obj-y += power.o syscall.o syscall_table.o setup.o +obj-y += process.o cpu-probe.o ptrace.o stacktrace.o +obj-y += probes/ + +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o +obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o +obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +endif diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c new file mode 100644 index 000000000..17479860d --- /dev/null +++ b/arch/csky/kernel/asm-offsets.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/sched.h> +#include <linux/kernel_stat.h> +#include <linux/kbuild.h> +#include <abi/regdef.h> + +int main(void) +{ + /* offsets into the task struct */ + DEFINE(TASK_STATE, offsetof(struct task_struct, state)); + DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); + DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); + DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); + DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); + DEFINE(TASK_MM, offsetof(struct task_struct, mm)); + DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + + /* offsets into the thread struct */ + DEFINE(THREAD_KSP, offsetof(struct thread_struct, sp)); + DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr)); + DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr)); + DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr)); + + /* offsets into the thread_info struct */ + DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TINFO_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TINFO_TP_VALUE, offsetof(struct thread_info, tp_value)); + DEFINE(TINFO_TASK, offsetof(struct thread_info, task)); + + /* offsets into the pt_regs */ + DEFINE(PT_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_ORIG_AO, offsetof(struct pt_regs, orig_a0)); + DEFINE(PT_SR, offsetof(struct pt_regs, sr)); + + DEFINE(PT_A0, offsetof(struct pt_regs, a0)); + DEFINE(PT_A1, offsetof(struct pt_regs, a1)); + DEFINE(PT_A2, offsetof(struct pt_regs, a2)); + DEFINE(PT_A3, offsetof(struct pt_regs, a3)); + DEFINE(PT_REGS0, offsetof(struct pt_regs, regs[0])); + DEFINE(PT_REGS1, offsetof(struct pt_regs, regs[1])); + DEFINE(PT_REGS2, offsetof(struct pt_regs, regs[2])); + DEFINE(PT_REGS3, offsetof(struct pt_regs, regs[3])); + DEFINE(PT_REGS4, offsetof(struct pt_regs, regs[4])); + DEFINE(PT_REGS5, offsetof(struct pt_regs, regs[5])); + DEFINE(PT_REGS6, offsetof(struct pt_regs, regs[6])); + DEFINE(PT_REGS7, offsetof(struct pt_regs, regs[7])); + DEFINE(PT_REGS8, offsetof(struct pt_regs, regs[8])); + DEFINE(PT_REGS9, offsetof(struct pt_regs, regs[9])); + DEFINE(PT_R15, offsetof(struct pt_regs, lr)); +#if defined(__CSKYABIV2__) + DEFINE(PT_R16, offsetof(struct pt_regs, exregs[0])); + DEFINE(PT_R17, offsetof(struct pt_regs, exregs[1])); + DEFINE(PT_R18, offsetof(struct pt_regs, exregs[2])); + DEFINE(PT_R19, offsetof(struct pt_regs, exregs[3])); + DEFINE(PT_R20, offsetof(struct pt_regs, exregs[4])); + DEFINE(PT_R21, offsetof(struct pt_regs, exregs[5])); + DEFINE(PT_R22, offsetof(struct pt_regs, exregs[6])); + DEFINE(PT_R23, offsetof(struct pt_regs, exregs[7])); + DEFINE(PT_R24, offsetof(struct pt_regs, exregs[8])); + DEFINE(PT_R25, offsetof(struct pt_regs, exregs[9])); + DEFINE(PT_R26, offsetof(struct pt_regs, exregs[10])); + DEFINE(PT_R27, offsetof(struct pt_regs, exregs[11])); + DEFINE(PT_R28, offsetof(struct pt_regs, exregs[12])); + DEFINE(PT_R29, offsetof(struct pt_regs, exregs[13])); + DEFINE(PT_R30, offsetof(struct pt_regs, exregs[14])); + DEFINE(PT_R31, offsetof(struct pt_regs, exregs[15])); + DEFINE(PT_RHI, offsetof(struct pt_regs, rhi)); + DEFINE(PT_RLO, offsetof(struct pt_regs, rlo)); +#endif + DEFINE(PT_USP, offsetof(struct pt_regs, usp)); + DEFINE(PT_FRAME_SIZE, sizeof(struct pt_regs)); + + /* offsets into the irq_cpustat_t struct */ + DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, + __softirq_pending)); + + /* signal defines */ + DEFINE(SIGSEGV, SIGSEGV); + DEFINE(SIGTRAP, SIGTRAP); + + return 0; +} diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S new file mode 100644 index 000000000..3821ef9b7 --- /dev/null +++ b/arch/csky/kernel/atomic.S @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include <abi/entry.h> + +.text + +/* + * int csky_cmpxchg(int oldval, int newval, int *ptr) + * + * If *ptr != oldval && return 1, + * else *ptr = newval return 0. + */ +ENTRY(csky_cmpxchg) + USPTOKSP + mfcr a3, epc + addi a3, TRAP0_SIZE + + subi sp, 16 + stw a3, (sp, 0) + mfcr a3, epsr + stw a3, (sp, 4) + mfcr a3, usp + stw a3, (sp, 8) + + psrset ee +#ifdef CONFIG_CPU_HAS_LDSTEX +1: + ldex a3, (a2) + cmpne a0, a3 + bt16 2f + mov a3, a1 + stex a3, (a2) + bez a3, 1b +2: + sync.is +#else +1: + ldw a3, (a2) + cmpne a0, a3 + bt16 3f +2: + stw a1, (a2) +3: +#endif + mvc a0 + ldw a3, (sp, 0) + mtcr a3, epc + ldw a3, (sp, 4) + mtcr a3, epsr + ldw a3, (sp, 8) + mtcr a3, usp + addi sp, 16 + KSPTOUSP + rte +END(csky_cmpxchg) + +#ifndef CONFIG_CPU_HAS_LDSTEX +/* + * Called from tlbmodified exception + */ +ENTRY(csky_cmpxchg_fixup) + mfcr a0, epc + lrw a1, 2b + cmpne a1, a0 + bt 1f + subi a1, (2b - 1b) + stw a1, (sp, LSAVE_PC) +1: + rts +END(csky_cmpxchg_fixup) +#endif diff --git a/arch/csky/kernel/cpu-probe.c b/arch/csky/kernel/cpu-probe.c new file mode 100644 index 000000000..5f15ca31d --- /dev/null +++ b/arch/csky/kernel/cpu-probe.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/of.h> +#include <linux/init.h> +#include <linux/seq_file.h> +#include <linux/memblock.h> + +#include <abi/reg_ops.h> + +static void percpu_print(void *arg) +{ + struct seq_file *m = (struct seq_file *)arg; + unsigned int cur, next, i; + + seq_printf(m, "processor : %d\n", smp_processor_id()); + seq_printf(m, "C-SKY CPU model : %s\n", CSKYCPU_DEF_NAME); + + /* read processor id, max is 100 */ + cur = mfcr("cr13"); + for (i = 0; i < 100; i++) { + seq_printf(m, "product info[%d] : 0x%08x\n", i, cur); + + next = mfcr("cr13"); + + /* some CPU only has one id reg */ + if (cur == next) + break; + + cur = next; + + /* cpid index is 31-28, reset */ + if (!(next >> 28)) { + while ((mfcr("cr13") >> 28) != i); + break; + } + } + + /* CPU feature regs, setup by bootloader or gdbinit */ + seq_printf(m, "hint (CPU funcs): 0x%08x\n", mfcr_hint()); + seq_printf(m, "ccr (L1C & MMU): 0x%08x\n", mfcr("cr18")); + seq_printf(m, "ccr2 (L2C) : 0x%08x\n", mfcr_ccr2()); + seq_printf(m, "\n"); +} + +static int c_show(struct seq_file *m, void *v) +{ + int cpu; + + for_each_online_cpu(cpu) + smp_call_function_single(cpu, percpu_print, m, true); + +#ifdef CSKY_ARCH_VERSION + seq_printf(m, "arch-version : %s\n", CSKY_ARCH_VERSION); + seq_printf(m, "\n"); +#endif + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void c_stop(struct seq_file *m, void *v) {} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = c_show, +}; diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S new file mode 100644 index 000000000..5a5cabd07 --- /dev/null +++ b/arch/csky/kernel/entry.S @@ -0,0 +1,378 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/linkage.h> +#include <abi/entry.h> +#include <abi/pgtable-bits.h> +#include <asm/errno.h> +#include <asm/setup.h> +#include <asm/unistd.h> +#include <asm/asm-offsets.h> +#include <linux/threads.h> +#include <asm/setup.h> +#include <asm/page.h> +#include <asm/thread_info.h> + +#define PTE_INDX_MSK 0xffc +#define PTE_INDX_SHIFT 10 +#define _PGDIR_SHIFT 22 + +.macro zero_fp +#ifdef CONFIG_STACKTRACE + movi r8, 0 +#endif +.endm + +.macro context_tracking +#ifdef CONFIG_CONTEXT_TRACKING + mfcr a0, epsr + btsti a0, 31 + bt 1f + jbsr context_tracking_user_exit + ldw a0, (sp, LSAVE_A0) + ldw a1, (sp, LSAVE_A1) + ldw a2, (sp, LSAVE_A2) + ldw a3, (sp, LSAVE_A3) +#if defined(__CSKYABIV1__) + ldw r6, (sp, LSAVE_A4) + ldw r7, (sp, LSAVE_A5) +#endif +1: +#endif +.endm + +.macro tlbop_begin name, val0, val1, val2 +ENTRY(csky_\name) + mtcr a3, ss2 + mtcr r6, ss3 + mtcr a2, ss4 + + RD_PGDR r6 + RD_MEH a3 +#ifdef CONFIG_CPU_HAS_TLBI + tlbi.vaas a3 + sync.is + + btsti a3, 31 + bf 1f + RD_PGDR_K r6 +1: +#else + bgeni a2, 31 + WR_MCIR a2 + bgeni a2, 25 + WR_MCIR a2 +#endif + bclri r6, 0 + lrw a2, va_pa_offset + ld.w a2, (a2, 0) + subu r6, a2 + bseti r6, 31 + + mov a2, a3 + lsri a2, _PGDIR_SHIFT + lsli a2, 2 + addu r6, a2 + ldw r6, (r6) + + lrw a2, va_pa_offset + ld.w a2, (a2, 0) + subu r6, a2 + bseti r6, 31 + + lsri a3, PTE_INDX_SHIFT + lrw a2, PTE_INDX_MSK + and a3, a2 + addu r6, a3 + ldw a3, (r6) + + movi a2, (_PAGE_PRESENT | \val0) + and a3, a2 + cmpne a3, a2 + bt \name + + /* First read/write the page, just update the flags */ + ldw a3, (r6) + bgeni a2, PAGE_VALID_BIT + bseti a2, PAGE_ACCESSED_BIT + bseti a2, \val1 + bseti a2, \val2 + or a3, a2 + stw a3, (r6) + + /* Some cpu tlb-hardrefill bypass the cache */ +#ifdef CONFIG_CPU_NEED_TLBSYNC + movi a2, 0x22 + bseti a2, 6 + mtcr r6, cr22 + mtcr a2, cr17 + sync +#endif + + mfcr a3, ss2 + mfcr r6, ss3 + mfcr a2, ss4 + rte +\name: + mfcr a3, ss2 + mfcr r6, ss3 + mfcr a2, ss4 + SAVE_ALL 0 +.endm +.macro tlbop_end is_write + zero_fp + context_tracking + RD_MEH a2 + psrset ee, ie + mov a0, sp + movi a1, \is_write + jbsr do_page_fault + jmpi ret_from_exception +.endm + +.text + +tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT +tlbop_end 0 + +tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT +tlbop_end 1 + +tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT +#ifndef CONFIG_CPU_HAS_LDSTEX +jbsr csky_cmpxchg_fixup +#endif +tlbop_end 1 + +ENTRY(csky_systemcall) + SAVE_ALL TRAP0_SIZE + zero_fp + context_tracking + psrset ee, ie + + lrw r9, __NR_syscalls + cmphs syscallid, r9 /* Check nr of syscall */ + bt 1f + + lrw r9, sys_call_table + ixw r9, syscallid + ldw syscallid, (r9) + cmpnei syscallid, 0 + bf ret_from_exception + + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + ldw r10, (r9, TINFO_FLAGS) + lrw r9, _TIF_SYSCALL_WORK + and r10, r9 + cmpnei r10, 0 + bt csky_syscall_trace +#if defined(__CSKYABIV2__) + subi sp, 8 + stw r5, (sp, 0x4) + stw r4, (sp, 0x0) + jsr syscallid /* Do system call */ + addi sp, 8 +#else + jsr syscallid +#endif + stw a0, (sp, LSAVE_A0) /* Save return value */ +1: +#ifdef CONFIG_DEBUG_RSEQ + mov a0, sp + jbsr rseq_syscall +#endif + jmpi ret_from_exception + +csky_syscall_trace: + mov a0, sp /* sp = pt_regs pointer */ + jbsr syscall_trace_enter + cmpnei a0, 0 + bt 1f + /* Prepare args before do system call */ + ldw a0, (sp, LSAVE_A0) + ldw a1, (sp, LSAVE_A1) + ldw a2, (sp, LSAVE_A2) + ldw a3, (sp, LSAVE_A3) +#if defined(__CSKYABIV2__) + subi sp, 8 + ldw r9, (sp, LSAVE_A4) + stw r9, (sp, 0x0) + ldw r9, (sp, LSAVE_A5) + stw r9, (sp, 0x4) + jsr syscallid /* Do system call */ + addi sp, 8 +#else + ldw r6, (sp, LSAVE_A4) + ldw r7, (sp, LSAVE_A5) + jsr syscallid /* Do system call */ +#endif + stw a0, (sp, LSAVE_A0) /* Save return value */ + +1: +#ifdef CONFIG_DEBUG_RSEQ + mov a0, sp + jbsr rseq_syscall +#endif + mov a0, sp /* right now, sp --> pt_regs */ + jbsr syscall_trace_exit + br ret_from_exception + +ENTRY(ret_from_kernel_thread) + jbsr schedule_tail + mov a0, r10 + jsr r9 + jbsr ret_from_exception + +ENTRY(ret_from_fork) + jbsr schedule_tail + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + ldw r10, (r9, TINFO_FLAGS) + lrw r9, _TIF_SYSCALL_WORK + and r10, r9 + cmpnei r10, 0 + bf ret_from_exception + mov a0, sp /* sp = pt_regs pointer */ + jbsr syscall_trace_exit + +ret_from_exception: + psrclr ie + ld r9, (sp, LSAVE_PSR) + btsti r9, 31 + + bt 1f + /* + * Load address of current->thread_info, Then get address of task_struct + * Get task_needreshed in task_struct + */ + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + + ldw r10, (r9, TINFO_FLAGS) + lrw r9, _TIF_WORK_MASK + and r10, r9 + cmpnei r10, 0 + bt exit_work +#ifdef CONFIG_CONTEXT_TRACKING + jbsr context_tracking_user_enter +#endif +1: +#ifdef CONFIG_PREEMPTION + mov r9, sp + bmaski r10, THREAD_SHIFT + andn r9, r10 + + ldw r10, (r9, TINFO_PREEMPT) + cmpnei r10, 0 + bt 2f + jbsr preempt_schedule_irq /* irq en/disable is done inside */ +2: +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS + ld r10, (sp, LSAVE_PSR) + btsti r10, 6 + bf 2f + jbsr trace_hardirqs_on +2: +#endif + RESTORE_ALL + +exit_work: + lrw r9, ret_from_exception + mov lr, r9 + + btsti r10, TIF_NEED_RESCHED + bt work_resched + + psrset ie + mov a0, sp + mov a1, r10 + jmpi do_notify_resume + +work_resched: + jmpi schedule + +ENTRY(csky_trap) + SAVE_ALL 0 + zero_fp + context_tracking + psrset ee + mov a0, sp /* Push Stack pointer arg */ + jbsr trap_c /* Call C-level trap handler */ + jmpi ret_from_exception + +/* + * Prototype from libc for abiv1: + * register unsigned int __result asm("a0"); + * asm( "trap 3" :"=r"(__result)::); + */ +ENTRY(csky_get_tls) + USPTOKSP + + /* increase epc for continue */ + mfcr a0, epc + addi a0, TRAP0_SIZE + mtcr a0, epc + + /* get current task thread_info with kernel 8K stack */ + bmaski a0, THREAD_SHIFT + not a0 + subi sp, 1 + and a0, sp + addi sp, 1 + + /* get tls */ + ldw a0, (a0, TINFO_TP_VALUE) + + KSPTOUSP + rte + +ENTRY(csky_irq) + SAVE_ALL 0 + zero_fp + context_tracking + psrset ee + +#ifdef CONFIG_TRACE_IRQFLAGS + jbsr trace_hardirqs_off +#endif + + + mov a0, sp + jbsr csky_do_IRQ + + jmpi ret_from_exception + +/* + * a0 = prev task_struct * + * a1 = next task_struct * + * a0 = return next + */ +ENTRY(__switch_to) + lrw a3, TASK_THREAD + addu a3, a0 + + SAVE_SWITCH_STACK + + stw sp, (a3, THREAD_KSP) + + /* Set up next process to run */ + lrw a3, TASK_THREAD + addu a3, a1 + + ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ + +#if defined(__CSKYABIV2__) + addi a3, a1, TASK_THREAD_INFO + ldw tls, (a3, TINFO_TP_VALUE) +#endif + + RESTORE_SWITCH_STACK + + rts +ENDPROC(__switch_to) diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c new file mode 100644 index 000000000..b4a7ec151 --- /dev/null +++ b/arch/csky/kernel/ftrace.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/ftrace.h> +#include <linux/uaccess.h> +#include <linux/stop_machine.h> +#include <asm/cacheflush.h> + +#ifdef CONFIG_DYNAMIC_FTRACE + +#define NOP 0x4000 +#define NOP32_HI 0xc400 +#define NOP32_LO 0x4820 +#define PUSH_LR 0x14d0 +#define MOVIH_LINK 0xea3a +#define ORI_LINK 0xef5a +#define JSR_LINK 0xe8fa +#define BSR_LINK 0xe000 + +/* + * Gcc-csky with -pg will insert stub in function prologue: + * push lr + * jbsr _mcount + * nop32 + * nop32 + * + * If the (callee - current_pc) is less then 64MB, we'll use bsr: + * push lr + * bsr _mcount + * nop32 + * nop32 + * else we'll use (movih + ori + jsr): + * push lr + * movih r26, ... + * ori r26, ... + * jsr r26 + * + * (r26 is our reserved link-reg) + * + */ +static inline void make_jbsr(unsigned long callee, unsigned long pc, + uint16_t *call, bool nolr) +{ + long offset; + + call[0] = nolr ? NOP : PUSH_LR; + + offset = (long) callee - (long) pc; + + if (unlikely(offset < -67108864 || offset > 67108864)) { + call[1] = MOVIH_LINK; + call[2] = callee >> 16; + call[3] = ORI_LINK; + call[4] = callee & 0xffff; + call[5] = JSR_LINK; + call[6] = 0; + } else { + offset = offset >> 1; + + call[1] = BSR_LINK | + ((uint16_t)((unsigned long) offset >> 16) & 0x3ff); + call[2] = (uint16_t)((unsigned long) offset & 0xffff); + call[3] = call[5] = NOP32_HI; + call[4] = call[6] = NOP32_LO; + } +} + +static uint16_t nops[7] = {NOP, NOP32_HI, NOP32_LO, NOP32_HI, NOP32_LO, + NOP32_HI, NOP32_LO}; +static int ftrace_check_current_nop(unsigned long hook) +{ + uint16_t olds[7]; + unsigned long hook_pos = hook - 2; + + if (copy_from_kernel_nofault((void *)olds, (void *)hook_pos, + sizeof(nops))) + return -EFAULT; + + if (memcmp((void *)nops, (void *)olds, sizeof(nops))) { + pr_err("%p: nop but get (%04x %04x %04x %04x %04x %04x %04x)\n", + (void *)hook_pos, + olds[0], olds[1], olds[2], olds[3], olds[4], olds[5], + olds[6]); + + return -EINVAL; + } + + return 0; +} + +static int ftrace_modify_code(unsigned long hook, unsigned long target, + bool enable, bool nolr) +{ + uint16_t call[7]; + + unsigned long hook_pos = hook - 2; + int ret = 0; + + make_jbsr(target, hook, call, nolr); + + ret = copy_to_kernel_nofault((void *)hook_pos, enable ? call : nops, + sizeof(nops)); + if (ret) + return -EPERM; + + flush_icache_range(hook_pos, hook_pos + MCOUNT_INSN_SIZE); + + return 0; +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + int ret = ftrace_check_current_nop(rec->ip); + + if (ret) + return ret; + + return ftrace_modify_code(rec->ip, addr, true, false); +} + +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + return ftrace_modify_code(rec->ip, addr, false, false); +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + int ret = ftrace_modify_code((unsigned long)&ftrace_call, + (unsigned long)func, true, true); + if (!ret) + ret = ftrace_modify_code((unsigned long)&ftrace_regs_call, + (unsigned long)func, true, true); + return ret; +} + +int __init ftrace_dyn_arch_init(void) +{ + return 0; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + return ftrace_modify_code(rec->ip, addr, true, true); +} +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long old; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + old = *parent; + + if (!function_graph_enter(old, self_addr, + *(unsigned long *)frame_pointer, parent)) { + /* + * For csky-gcc function has sub-call: + * subi sp, sp, 8 + * stw r8, (sp, 0) + * mov r8, sp + * st.w r15, (sp, 0x4) + * push r15 + * jl _mcount + * We only need set *parent for resume + * + * For csky-gcc function has no sub-call: + * subi sp, sp, 4 + * stw r8, (sp, 0) + * mov r8, sp + * push r15 + * jl _mcount + * We need set *parent and *(frame_pointer + 4) for resume, + * because lr is resumed twice. + */ + *parent = return_hooker; + frame_pointer += 4; + if (*(unsigned long *)frame_pointer == old) + *(unsigned long *)frame_pointer = return_hooker; + } +} + +#ifdef CONFIG_DYNAMIC_FTRACE +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_code((unsigned long)&ftrace_graph_call, + (unsigned long)&ftrace_graph_caller, true, true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_code((unsigned long)&ftrace_graph_call, + (unsigned long)&ftrace_graph_caller, false, true); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_DYNAMIC_FTRACE +#ifndef CONFIG_CPU_HAS_ICACHE_INS +struct ftrace_modify_param { + int command; + atomic_t cpu_count; +}; + +static int __ftrace_modify_code(void *data) +{ + struct ftrace_modify_param *param = data; + + if (atomic_inc_return(¶m->cpu_count) == 1) { + ftrace_modify_all_code(param->command); + atomic_inc(¶m->cpu_count); + } else { + while (atomic_read(¶m->cpu_count) <= num_online_cpus()) + cpu_relax(); + local_icache_inv_all(NULL); + } + + return 0; +} + +void arch_ftrace_update_code(int command) +{ + struct ftrace_modify_param param = { command, ATOMIC_INIT(0) }; + + stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask); +} +#endif +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* _mcount is defined in abi's mcount.S */ +EXPORT_SYMBOL(_mcount); diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S new file mode 100644 index 000000000..17ed9d250 --- /dev/null +++ b/arch/csky/kernel/head.S @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/linkage.h> +#include <linux/init.h> +#include <asm/page.h> +#include <abi/entry.h> + +__HEAD +ENTRY(_start) + SETUP_MMU + + /* set stack point */ + lrw r6, init_thread_union + THREAD_SIZE + mov sp, r6 + + jmpi csky_start +END(_start) + +#ifdef CONFIG_SMP +.align 10 +ENTRY(_start_smp_secondary) + SETUP_MMU + + /* copy msa1 from CPU0 */ + lrw r6, secondary_msa1 + ld.w r6, (r6, 0) + mtcr r6, cr<31, 15> + + /* set stack point */ + lrw r6, secondary_stack + ld.w r6, (r6, 0) + mov sp, r6 + + jmpi csky_start_secondary +END(_start_smp_secondary) +#endif diff --git a/arch/csky/kernel/irq.c b/arch/csky/kernel/irq.c new file mode 100644 index 000000000..03a1930f1 --- /dev/null +++ b/arch/csky/kernel/irq.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <asm/traps.h> +#include <asm/smp.h> + +void __init init_IRQ(void) +{ + irqchip_init(); +#ifdef CONFIG_SMP + setup_smp_ipi(); +#endif +} + +asmlinkage void __irq_entry csky_do_IRQ(struct pt_regs *regs) +{ + handle_arch_irq(regs); +} diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c new file mode 100644 index 000000000..6cd82d69c --- /dev/null +++ b/arch/csky/kernel/module.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/moduleloader.h> +#include <linux/elf.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> + +#ifdef CONFIG_CPU_CK810 +#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000) +#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0) + +#define CHANGE_JSRI_TO_LRW(addr) do { \ + *(uint16_t *)(addr) = (*(uint16_t *)(addr) & 0xFF9F) | 0x001a; \ + *((uint16_t *)(addr) + 1) = *((uint16_t *)(addr) + 1) & 0xFFFF; \ +} while (0) + +#define SET_JSR32_R26(addr) do { \ + *(uint16_t *)(addr) = 0xE8Fa; \ + *((uint16_t *)(addr) + 1) = 0x0000; \ +} while (0) + +static void jsri_2_lrw_jsr(uint32_t *location) +{ + uint16_t *location_tmp = (uint16_t *)location; + + if (IS_BSR32(*location_tmp, *(location_tmp + 1))) + return; + + if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) { + /* jsri 0x... --> lrw r26, 0x... */ + CHANGE_JSRI_TO_LRW(location); + /* lsli r0, r0 --> jsr r26 */ + SET_JSR32_R26(location + 1); + } +} +#else +static void inline jsri_2_lrw_jsr(uint32_t *location) +{ + return; +} +#endif + +int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, struct module *me) +{ + unsigned int i; + Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + uint32_t *location; + short *temp; + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(rel[i].r_info); + + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_CSKY_32: + /* We add the value into the location given */ + *location = rel[i].r_addend + sym->st_value; + break; + case R_CSKY_PC32: + /* Add the value, subtract its postition */ + *location = rel[i].r_addend + sym->st_value + - (uint32_t)location; + break; + case R_CSKY_PCRELJSR_IMM11BY2: + break; + case R_CSKY_PCRELJSR_IMM26BY2: + jsri_2_lrw_jsr(location); + break; + case R_CSKY_ADDR_HI16: + temp = ((short *)location) + 1; + *temp = (short) + ((rel[i].r_addend + sym->st_value) >> 16); + break; + case R_CSKY_ADDR_LO16: + temp = ((short *)location) + 1; + *temp = (short) + ((rel[i].r_addend + sym->st_value) & 0xffff); + break; + default: + pr_err("module %s: Unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c new file mode 100644 index 000000000..75e1f9df5 --- /dev/null +++ b/arch/csky/kernel/perf_callchain.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/perf_event.h> +#include <linux/uaccess.h> + +/* Kernel callchain */ +struct stackframe { + unsigned long fp; + unsigned long lr; +}; + +static int unwind_frame_kernel(struct stackframe *frame) +{ + unsigned long low = (unsigned long)task_stack_page(current); + unsigned long high = low + THREAD_SIZE; + + if (unlikely(frame->fp < low || frame->fp > high)) + return -EPERM; + + if (kstack_end((void *)frame->fp) || frame->fp & 0x3) + return -EPERM; + + *frame = *(struct stackframe *)frame->fp; + + if (__kernel_text_address(frame->lr)) { + int graph = 0; + + frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr, + NULL); + } + return 0; +} + +static void notrace walk_stackframe(struct stackframe *fr, + struct perf_callchain_entry_ctx *entry) +{ + do { + perf_callchain_store(entry, fr->lr); + } while (unwind_frame_kernel(fr) >= 0); +} + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, + unsigned long fp, unsigned long reg_lr) +{ + struct stackframe buftail; + unsigned long lr = 0; + unsigned long __user *user_frame_tail = (unsigned long __user *)fp; + + /* Check accessibility of one struct frame_tail beyond */ + if (!access_ok(user_frame_tail, sizeof(buftail))) + return 0; + if (__copy_from_user_inatomic(&buftail, user_frame_tail, + sizeof(buftail))) + return 0; + + if (reg_lr != 0) + lr = reg_lr; + else + lr = buftail.lr; + + fp = buftail.fp; + perf_callchain_store(entry, lr); + + return fp; +} + +/* + * This will be called when the target is in user mode + * This function will only be called when we use + * "PERF_SAMPLE_CALLCHAIN" in + * kernel/events/core.c:perf_prepare_sample() + * + * How to trigger perf_callchain_[user/kernel] : + * $ perf record -e cpu-clock --call-graph fp ./program + * $ perf report --call-graph + * + * On C-SKY platform, the program being sampled and the C library + * need to be compiled with * -mbacktrace, otherwise the user + * stack will not contain function frame. + */ +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + unsigned long fp = 0; + + /* C-SKY does not support virtualization. */ + if (guest_cbs && guest_cbs->is_in_guest()) + return; + + fp = regs->regs[4]; + perf_callchain_store(entry, regs->pc); + + /* + * While backtrace from leaf function, lr is normally + * not saved inside frame on C-SKY, so get lr from pt_regs + * at the sample point. However, lr value can be incorrect if + * lr is used as temp register + */ + fp = user_backtrace(entry, fp, regs->lr); + + while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) + fp = user_backtrace(entry, fp, 0); +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + struct stackframe fr; + + /* C-SKY does not support virtualization. */ + if (guest_cbs && guest_cbs->is_in_guest()) { + pr_warn("C-SKY does not support perf in guest mode!"); + return; + } + + fr.fp = regs->regs[4]; + fr.lr = regs->lr; + walk_stackframe(&fr, entry); +} diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c new file mode 100644 index 000000000..1a29f1157 --- /dev/null +++ b/arch/csky/kernel/perf_event.c @@ -0,0 +1,1371 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> + +#define CSKY_PMU_MAX_EVENTS 32 +#define DEFAULT_COUNT_WIDTH 48 + +#define HPCR "<0, 0x0>" /* PMU Control reg */ +#define HPSPR "<0, 0x1>" /* Start PC reg */ +#define HPEPR "<0, 0x2>" /* End PC reg */ +#define HPSIR "<0, 0x3>" /* Soft Counter reg */ +#define HPCNTENR "<0, 0x4>" /* Count Enable reg */ +#define HPINTENR "<0, 0x5>" /* Interrupt Enable reg */ +#define HPOFSR "<0, 0x6>" /* Interrupt Status reg */ + +/* The events for a given PMU register set. */ +struct pmu_hw_events { + /* + * The events that are active on the PMU for the given index. + */ + struct perf_event *events[CSKY_PMU_MAX_EVENTS]; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + unsigned long used_mask[BITS_TO_LONGS(CSKY_PMU_MAX_EVENTS)]; +}; + +static uint64_t (*hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])(void); +static void (*hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])(uint64_t val); + +static struct csky_pmu_t { + struct pmu pmu; + struct pmu_hw_events __percpu *hw_events; + struct platform_device *plat_device; + uint32_t count_width; + uint32_t hpcr; + u64 max_period; +} csky_pmu; +static int csky_pmu_irq; + +#define to_csky_pmu(p) (container_of(p, struct csky_pmu, pmu)) + +#define cprgr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprgr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define cpwgr(reg, val) \ +({ \ + asm volatile( \ + "cpwgr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +#define cprcr(reg) \ +({ \ + unsigned int tmp; \ + asm volatile("cprcr %0, "reg"\n" \ + : "=r"(tmp) \ + : \ + : "memory"); \ + tmp; \ +}) + +#define cpwcr(reg, val) \ +({ \ + asm volatile( \ + "cpwcr %0, "reg"\n" \ + : \ + : "r"(val) \ + : "memory"); \ +}) + +/* cycle counter */ +static uint64_t csky_pmu_read_cc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x3>"); + lo = cprgr("<0, 0x2>"); + hi = cprgr("<0, 0x3>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cc(uint64_t val) +{ + cpwgr("<0, 0x2>", (uint32_t) val); + cpwgr("<0, 0x3>", (uint32_t) (val >> 32)); +} + +/* instruction counter */ +static uint64_t csky_pmu_read_ic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x5>"); + lo = cprgr("<0, 0x4>"); + hi = cprgr("<0, 0x5>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ic(uint64_t val) +{ + cpwgr("<0, 0x4>", (uint32_t) val); + cpwgr("<0, 0x5>", (uint32_t) (val >> 32)); +} + +/* l1 icache access counter */ +static uint64_t csky_pmu_read_icac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x7>"); + lo = cprgr("<0, 0x6>"); + hi = cprgr("<0, 0x7>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_icac(uint64_t val) +{ + cpwgr("<0, 0x6>", (uint32_t) val); + cpwgr("<0, 0x7>", (uint32_t) (val >> 32)); +} + +/* l1 icache miss counter */ +static uint64_t csky_pmu_read_icmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x9>"); + lo = cprgr("<0, 0x8>"); + hi = cprgr("<0, 0x9>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_icmc(uint64_t val) +{ + cpwgr("<0, 0x8>", (uint32_t) val); + cpwgr("<0, 0x9>", (uint32_t) (val >> 32)); +} + +/* l1 dcache access counter */ +static uint64_t csky_pmu_read_dcac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xb>"); + lo = cprgr("<0, 0xa>"); + hi = cprgr("<0, 0xb>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcac(uint64_t val) +{ + cpwgr("<0, 0xa>", (uint32_t) val); + cpwgr("<0, 0xb>", (uint32_t) (val >> 32)); +} + +/* l1 dcache miss counter */ +static uint64_t csky_pmu_read_dcmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xd>"); + lo = cprgr("<0, 0xc>"); + hi = cprgr("<0, 0xd>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcmc(uint64_t val) +{ + cpwgr("<0, 0xc>", (uint32_t) val); + cpwgr("<0, 0xd>", (uint32_t) (val >> 32)); +} + +/* l2 cache access counter */ +static uint64_t csky_pmu_read_l2ac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0xf>"); + lo = cprgr("<0, 0xe>"); + hi = cprgr("<0, 0xf>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2ac(uint64_t val) +{ + cpwgr("<0, 0xe>", (uint32_t) val); + cpwgr("<0, 0xf>", (uint32_t) (val >> 32)); +} + +/* l2 cache miss counter */ +static uint64_t csky_pmu_read_l2mc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x11>"); + lo = cprgr("<0, 0x10>"); + hi = cprgr("<0, 0x11>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2mc(uint64_t val) +{ + cpwgr("<0, 0x10>", (uint32_t) val); + cpwgr("<0, 0x11>", (uint32_t) (val >> 32)); +} + +/* I-UTLB miss counter */ +static uint64_t csky_pmu_read_iutlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x15>"); + lo = cprgr("<0, 0x14>"); + hi = cprgr("<0, 0x15>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_iutlbmc(uint64_t val) +{ + cpwgr("<0, 0x14>", (uint32_t) val); + cpwgr("<0, 0x15>", (uint32_t) (val >> 32)); +} + +/* D-UTLB miss counter */ +static uint64_t csky_pmu_read_dutlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x17>"); + lo = cprgr("<0, 0x16>"); + hi = cprgr("<0, 0x17>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dutlbmc(uint64_t val) +{ + cpwgr("<0, 0x16>", (uint32_t) val); + cpwgr("<0, 0x17>", (uint32_t) (val >> 32)); +} + +/* JTLB miss counter */ +static uint64_t csky_pmu_read_jtlbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x19>"); + lo = cprgr("<0, 0x18>"); + hi = cprgr("<0, 0x19>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_jtlbmc(uint64_t val) +{ + cpwgr("<0, 0x18>", (uint32_t) val); + cpwgr("<0, 0x19>", (uint32_t) (val >> 32)); +} + +/* software counter */ +static uint64_t csky_pmu_read_softc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1b>"); + lo = cprgr("<0, 0x1a>"); + hi = cprgr("<0, 0x1b>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_softc(uint64_t val) +{ + cpwgr("<0, 0x1a>", (uint32_t) val); + cpwgr("<0, 0x1b>", (uint32_t) (val >> 32)); +} + +/* conditional branch mispredict counter */ +static uint64_t csky_pmu_read_cbmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1d>"); + lo = cprgr("<0, 0x1c>"); + hi = cprgr("<0, 0x1d>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cbmc(uint64_t val) +{ + cpwgr("<0, 0x1c>", (uint32_t) val); + cpwgr("<0, 0x1d>", (uint32_t) (val >> 32)); +} + +/* conditional branch instruction counter */ +static uint64_t csky_pmu_read_cbic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x1f>"); + lo = cprgr("<0, 0x1e>"); + hi = cprgr("<0, 0x1f>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_cbic(uint64_t val) +{ + cpwgr("<0, 0x1e>", (uint32_t) val); + cpwgr("<0, 0x1f>", (uint32_t) (val >> 32)); +} + +/* indirect branch mispredict counter */ +static uint64_t csky_pmu_read_ibmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x21>"); + lo = cprgr("<0, 0x20>"); + hi = cprgr("<0, 0x21>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ibmc(uint64_t val) +{ + cpwgr("<0, 0x20>", (uint32_t) val); + cpwgr("<0, 0x21>", (uint32_t) (val >> 32)); +} + +/* indirect branch instruction counter */ +static uint64_t csky_pmu_read_ibic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x23>"); + lo = cprgr("<0, 0x22>"); + hi = cprgr("<0, 0x23>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_ibic(uint64_t val) +{ + cpwgr("<0, 0x22>", (uint32_t) val); + cpwgr("<0, 0x23>", (uint32_t) (val >> 32)); +} + +/* LSU spec fail counter */ +static uint64_t csky_pmu_read_lsfc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x25>"); + lo = cprgr("<0, 0x24>"); + hi = cprgr("<0, 0x25>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_lsfc(uint64_t val) +{ + cpwgr("<0, 0x24>", (uint32_t) val); + cpwgr("<0, 0x25>", (uint32_t) (val >> 32)); +} + +/* store instruction counter */ +static uint64_t csky_pmu_read_sic(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x27>"); + lo = cprgr("<0, 0x26>"); + hi = cprgr("<0, 0x27>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_sic(uint64_t val) +{ + cpwgr("<0, 0x26>", (uint32_t) val); + cpwgr("<0, 0x27>", (uint32_t) (val >> 32)); +} + +/* dcache read access counter */ +static uint64_t csky_pmu_read_dcrac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x29>"); + lo = cprgr("<0, 0x28>"); + hi = cprgr("<0, 0x29>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcrac(uint64_t val) +{ + cpwgr("<0, 0x28>", (uint32_t) val); + cpwgr("<0, 0x29>", (uint32_t) (val >> 32)); +} + +/* dcache read miss counter */ +static uint64_t csky_pmu_read_dcrmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2b>"); + lo = cprgr("<0, 0x2a>"); + hi = cprgr("<0, 0x2b>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcrmc(uint64_t val) +{ + cpwgr("<0, 0x2a>", (uint32_t) val); + cpwgr("<0, 0x2b>", (uint32_t) (val >> 32)); +} + +/* dcache write access counter */ +static uint64_t csky_pmu_read_dcwac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2d>"); + lo = cprgr("<0, 0x2c>"); + hi = cprgr("<0, 0x2d>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcwac(uint64_t val) +{ + cpwgr("<0, 0x2c>", (uint32_t) val); + cpwgr("<0, 0x2d>", (uint32_t) (val >> 32)); +} + +/* dcache write miss counter */ +static uint64_t csky_pmu_read_dcwmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x2f>"); + lo = cprgr("<0, 0x2e>"); + hi = cprgr("<0, 0x2f>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_dcwmc(uint64_t val) +{ + cpwgr("<0, 0x2e>", (uint32_t) val); + cpwgr("<0, 0x2f>", (uint32_t) (val >> 32)); +} + +/* l2cache read access counter */ +static uint64_t csky_pmu_read_l2rac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x31>"); + lo = cprgr("<0, 0x30>"); + hi = cprgr("<0, 0x31>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2rac(uint64_t val) +{ + cpwgr("<0, 0x30>", (uint32_t) val); + cpwgr("<0, 0x31>", (uint32_t) (val >> 32)); +} + +/* l2cache read miss counter */ +static uint64_t csky_pmu_read_l2rmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x33>"); + lo = cprgr("<0, 0x32>"); + hi = cprgr("<0, 0x33>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2rmc(uint64_t val) +{ + cpwgr("<0, 0x32>", (uint32_t) val); + cpwgr("<0, 0x33>", (uint32_t) (val >> 32)); +} + +/* l2cache write access counter */ +static uint64_t csky_pmu_read_l2wac(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x35>"); + lo = cprgr("<0, 0x34>"); + hi = cprgr("<0, 0x35>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2wac(uint64_t val) +{ + cpwgr("<0, 0x34>", (uint32_t) val); + cpwgr("<0, 0x35>", (uint32_t) (val >> 32)); +} + +/* l2cache write miss counter */ +static uint64_t csky_pmu_read_l2wmc(void) +{ + uint32_t lo, hi, tmp; + uint64_t result; + + do { + tmp = cprgr("<0, 0x37>"); + lo = cprgr("<0, 0x36>"); + hi = cprgr("<0, 0x37>"); + } while (hi != tmp); + + result = (uint64_t) (hi) << 32; + result |= lo; + + return result; +} + +static void csky_pmu_write_l2wmc(uint64_t val) +{ + cpwgr("<0, 0x36>", (uint32_t) val); + cpwgr("<0, 0x37>", (uint32_t) (val >> 32)); +} + +#define HW_OP_UNSUPPORTED 0xffff +static const int csky_pmu_hw_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0x1, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x2, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xf, + [PERF_COUNT_HW_BRANCH_MISSES] = 0xe, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_REF_CPU_CYCLES] = HW_OP_UNSUPPORTED, +}; + +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xffff +static const int csky_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x5, + [C(RESULT_MISS)] = 0x6, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x14, + [C(RESULT_MISS)] = 0x15, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x16, + [C(RESULT_MISS)] = 0x17, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#endif + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x3, + [C(RESULT_MISS)] = 0x4, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x7, + [C(RESULT_MISS)] = 0x8, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x18, + [C(RESULT_MISS)] = 0x19, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x1a, + [C(RESULT_MISS)] = 0x1b, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#endif + }, + [C(DTLB)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x14, + [C(RESULT_MISS)] = 0xb, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x16, + [C(RESULT_MISS)] = 0xb, + }, +#endif + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { +#ifdef CONFIG_CPU_CK810 + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, +#else + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x3, + [C(RESULT_MISS)] = 0xa, + }, +#endif + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +int csky_pmu_event_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64)csky_pmu.max_period) + left = csky_pmu.max_period; + + /* + * The hw event starts counting from this event offset, + * mark it to be able to extract future "deltas": + */ + local64_set(&hwc->prev_count, (u64)(-left)); + + if (hw_raw_write_mapping[hwc->idx] != NULL) + hw_raw_write_mapping[hwc->idx]((u64)(-left) & + csky_pmu.max_period); + + cpwcr(HPOFSR, ~BIT(hwc->idx) & cprcr(HPOFSR)); + + perf_event_update_userpage(event); + + return ret; +} + +static void csky_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc) +{ + uint64_t prev_raw_count = local64_read(&hwc->prev_count); + /* + * Sign extend count value to 64bit, otherwise delta calculation + * would be incorrect when overflow occurs. + */ + uint64_t new_raw_count = sign_extend64( + hw_raw_read_mapping[hwc->idx](), csky_pmu.count_width - 1); + int64_t delta = new_raw_count - prev_raw_count; + + /* + * We aren't afraid of hwc->prev_count changing beneath our feet + * because there's no way for us to re-enter this function anytime. + */ + local64_set(&hwc->prev_count, new_raw_count); + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static void csky_pmu_reset(void *info) +{ + cpwcr(HPCR, BIT(31) | BIT(30) | BIT(1)); +} + +static void csky_pmu_read(struct perf_event *event) +{ + csky_perf_event_update(event, &event->hw); +} + +static int csky_pmu_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + + cache_type = (config >> 0) & 0xff; + cache_op = (config >> 8) & 0xff; + cache_result = (config >> 16) & 0xff; + + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + return csky_pmu_cache_map[cache_type][cache_op][cache_result]; +} + +static int csky_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int ret; + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + if (event->attr.config >= PERF_COUNT_HW_MAX) + return -ENOENT; + ret = csky_pmu_hw_map[event->attr.config]; + if (ret == HW_OP_UNSUPPORTED) + return -ENOENT; + hwc->idx = ret; + break; + case PERF_TYPE_HW_CACHE: + ret = csky_pmu_cache_event(event->attr.config); + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + hwc->idx = ret; + break; + case PERF_TYPE_RAW: + if (hw_raw_read_mapping[event->attr.config] == NULL) + return -ENOENT; + hwc->idx = event->attr.config; + break; + default: + return -ENOENT; + } + + if (event->attr.exclude_user) + csky_pmu.hpcr = BIT(2); + else if (event->attr.exclude_kernel) + csky_pmu.hpcr = BIT(3); + else + csky_pmu.hpcr = BIT(2) | BIT(3); + + csky_pmu.hpcr |= BIT(1) | BIT(0); + + return 0; +} + +/* starts all counters */ +static void csky_pmu_enable(struct pmu *pmu) +{ + cpwcr(HPCR, csky_pmu.hpcr); +} + +/* stops all counters */ +static void csky_pmu_disable(struct pmu *pmu) +{ + cpwcr(HPCR, BIT(1)); +} + +static void csky_pmu_start(struct perf_event *event, int flags) +{ + unsigned long flg; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + csky_pmu_event_set_period(event); + + local_irq_save(flg); + + cpwcr(HPINTENR, BIT(idx) | cprcr(HPINTENR)); + cpwcr(HPCNTENR, BIT(idx) | cprcr(HPCNTENR)); + + local_irq_restore(flg); +} + +static void csky_pmu_stop_event(struct perf_event *event) +{ + unsigned long flg; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + local_irq_save(flg); + + cpwcr(HPINTENR, ~BIT(idx) & cprcr(HPINTENR)); + cpwcr(HPCNTENR, ~BIT(idx) & cprcr(HPCNTENR)); + + local_irq_restore(flg); +} + +static void csky_pmu_stop(struct perf_event *event, int flags) +{ + if (!(event->hw.state & PERF_HES_STOPPED)) { + csky_pmu_stop_event(event); + event->hw.state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && + !(event->hw.state & PERF_HES_UPTODATE)) { + csky_perf_event_update(event, &event->hw); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +static void csky_pmu_del(struct perf_event *event, int flags) +{ + struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events); + struct hw_perf_event *hwc = &event->hw; + + csky_pmu_stop(event, PERF_EF_UPDATE); + + hw_events->events[hwc->idx] = NULL; + + perf_event_update_userpage(event); +} + +/* allocate hardware counter and optionally start counting */ +static int csky_pmu_add(struct perf_event *event, int flags) +{ + struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events); + struct hw_perf_event *hwc = &event->hw; + + hw_events->events[hwc->idx] = event; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + csky_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static irqreturn_t csky_pmu_handle_irq(int irq_num, void *dev) +{ + struct perf_sample_data data; + struct pmu_hw_events *cpuc = this_cpu_ptr(csky_pmu.hw_events); + struct pt_regs *regs; + int idx; + + /* + * Did an overflow occur? + */ + if (!cprcr(HPOFSR)) + return IRQ_NONE; + + /* + * Handle the counter(s) overflow(s) + */ + regs = get_irq_regs(); + + csky_pmu_disable(&csky_pmu.pmu); + + for (idx = 0; idx < CSKY_PMU_MAX_EVENTS; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + /* Ignore if we don't have an event. */ + if (!event) + continue; + /* + * We have a single interrupt for all counters. Check that + * each counter has overflowed before we process it. + */ + if (!(cprcr(HPOFSR) & BIT(idx))) + continue; + + hwc = &event->hw; + csky_perf_event_update(event, &event->hw); + perf_sample_data_init(&data, 0, hwc->last_period); + csky_pmu_event_set_period(event); + + if (perf_event_overflow(event, &data, regs)) + csky_pmu_stop_event(event); + } + + csky_pmu_enable(&csky_pmu.pmu); + + /* + * Handle the pending perf events. + * + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this + * will not work. + */ + irq_work_run(); + + return IRQ_HANDLED; +} + +static int csky_pmu_request_irq(irq_handler_t handler) +{ + int err, irqs; + struct platform_device *pmu_device = csky_pmu.plat_device; + + if (!pmu_device) + return -ENODEV; + + irqs = min(pmu_device->num_resources, num_possible_cpus()); + if (irqs < 1) { + pr_err("no irqs for PMUs defined\n"); + return -ENODEV; + } + + csky_pmu_irq = platform_get_irq(pmu_device, 0); + if (csky_pmu_irq < 0) + return -ENODEV; + err = request_percpu_irq(csky_pmu_irq, handler, "csky-pmu", + this_cpu_ptr(csky_pmu.hw_events)); + if (err) { + pr_err("unable to request IRQ%d for CSKY PMU counters\n", + csky_pmu_irq); + return err; + } + + return 0; +} + +static void csky_pmu_free_irq(void) +{ + int irq; + struct platform_device *pmu_device = csky_pmu.plat_device; + + irq = platform_get_irq(pmu_device, 0); + if (irq >= 0) + free_percpu_irq(irq, this_cpu_ptr(csky_pmu.hw_events)); +} + +int init_hw_perf_events(void) +{ + csky_pmu.hw_events = alloc_percpu_gfp(struct pmu_hw_events, + GFP_KERNEL); + if (!csky_pmu.hw_events) { + pr_info("failed to allocate per-cpu PMU data.\n"); + return -ENOMEM; + } + + csky_pmu.pmu = (struct pmu) { + .pmu_enable = csky_pmu_enable, + .pmu_disable = csky_pmu_disable, + .event_init = csky_pmu_event_init, + .add = csky_pmu_add, + .del = csky_pmu_del, + .start = csky_pmu_start, + .stop = csky_pmu_stop, + .read = csky_pmu_read, + }; + + memset((void *)hw_raw_read_mapping, 0, + sizeof(hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])); + + hw_raw_read_mapping[0x1] = csky_pmu_read_cc; + hw_raw_read_mapping[0x2] = csky_pmu_read_ic; + hw_raw_read_mapping[0x3] = csky_pmu_read_icac; + hw_raw_read_mapping[0x4] = csky_pmu_read_icmc; + hw_raw_read_mapping[0x5] = csky_pmu_read_dcac; + hw_raw_read_mapping[0x6] = csky_pmu_read_dcmc; + hw_raw_read_mapping[0x7] = csky_pmu_read_l2ac; + hw_raw_read_mapping[0x8] = csky_pmu_read_l2mc; + hw_raw_read_mapping[0xa] = csky_pmu_read_iutlbmc; + hw_raw_read_mapping[0xb] = csky_pmu_read_dutlbmc; + hw_raw_read_mapping[0xc] = csky_pmu_read_jtlbmc; + hw_raw_read_mapping[0xd] = csky_pmu_read_softc; + hw_raw_read_mapping[0xe] = csky_pmu_read_cbmc; + hw_raw_read_mapping[0xf] = csky_pmu_read_cbic; + hw_raw_read_mapping[0x10] = csky_pmu_read_ibmc; + hw_raw_read_mapping[0x11] = csky_pmu_read_ibic; + hw_raw_read_mapping[0x12] = csky_pmu_read_lsfc; + hw_raw_read_mapping[0x13] = csky_pmu_read_sic; + hw_raw_read_mapping[0x14] = csky_pmu_read_dcrac; + hw_raw_read_mapping[0x15] = csky_pmu_read_dcrmc; + hw_raw_read_mapping[0x16] = csky_pmu_read_dcwac; + hw_raw_read_mapping[0x17] = csky_pmu_read_dcwmc; + hw_raw_read_mapping[0x18] = csky_pmu_read_l2rac; + hw_raw_read_mapping[0x19] = csky_pmu_read_l2rmc; + hw_raw_read_mapping[0x1a] = csky_pmu_read_l2wac; + hw_raw_read_mapping[0x1b] = csky_pmu_read_l2wmc; + + memset((void *)hw_raw_write_mapping, 0, + sizeof(hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])); + + hw_raw_write_mapping[0x1] = csky_pmu_write_cc; + hw_raw_write_mapping[0x2] = csky_pmu_write_ic; + hw_raw_write_mapping[0x3] = csky_pmu_write_icac; + hw_raw_write_mapping[0x4] = csky_pmu_write_icmc; + hw_raw_write_mapping[0x5] = csky_pmu_write_dcac; + hw_raw_write_mapping[0x6] = csky_pmu_write_dcmc; + hw_raw_write_mapping[0x7] = csky_pmu_write_l2ac; + hw_raw_write_mapping[0x8] = csky_pmu_write_l2mc; + hw_raw_write_mapping[0xa] = csky_pmu_write_iutlbmc; + hw_raw_write_mapping[0xb] = csky_pmu_write_dutlbmc; + hw_raw_write_mapping[0xc] = csky_pmu_write_jtlbmc; + hw_raw_write_mapping[0xd] = csky_pmu_write_softc; + hw_raw_write_mapping[0xe] = csky_pmu_write_cbmc; + hw_raw_write_mapping[0xf] = csky_pmu_write_cbic; + hw_raw_write_mapping[0x10] = csky_pmu_write_ibmc; + hw_raw_write_mapping[0x11] = csky_pmu_write_ibic; + hw_raw_write_mapping[0x12] = csky_pmu_write_lsfc; + hw_raw_write_mapping[0x13] = csky_pmu_write_sic; + hw_raw_write_mapping[0x14] = csky_pmu_write_dcrac; + hw_raw_write_mapping[0x15] = csky_pmu_write_dcrmc; + hw_raw_write_mapping[0x16] = csky_pmu_write_dcwac; + hw_raw_write_mapping[0x17] = csky_pmu_write_dcwmc; + hw_raw_write_mapping[0x18] = csky_pmu_write_l2rac; + hw_raw_write_mapping[0x19] = csky_pmu_write_l2rmc; + hw_raw_write_mapping[0x1a] = csky_pmu_write_l2wac; + hw_raw_write_mapping[0x1b] = csky_pmu_write_l2wmc; + + return 0; +} + +static int csky_pmu_starting_cpu(unsigned int cpu) +{ + enable_percpu_irq(csky_pmu_irq, 0); + return 0; +} + +static int csky_pmu_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(csky_pmu_irq); + return 0; +} + +int csky_pmu_device_probe(struct platform_device *pdev, + const struct of_device_id *of_table) +{ + struct device_node *node = pdev->dev.of_node; + int ret; + + ret = init_hw_perf_events(); + if (ret) { + pr_notice("[perf] failed to probe PMU!\n"); + return ret; + } + + if (of_property_read_u32(node, "count-width", + &csky_pmu.count_width)) { + csky_pmu.count_width = DEFAULT_COUNT_WIDTH; + } + csky_pmu.max_period = BIT_ULL(csky_pmu.count_width) - 1; + + csky_pmu.plat_device = pdev; + + /* Ensure the PMU has sane values out of reset. */ + on_each_cpu(csky_pmu_reset, &csky_pmu, 1); + + ret = csky_pmu_request_irq(csky_pmu_handle_irq); + if (ret) { + csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + pr_notice("[perf] PMU request irq fail!\n"); + } + + ret = cpuhp_setup_state(CPUHP_AP_PERF_ONLINE, "AP_PERF_ONLINE", + csky_pmu_starting_cpu, + csky_pmu_dying_cpu); + if (ret) { + csky_pmu_free_irq(); + free_percpu(csky_pmu.hw_events); + return ret; + } + + ret = perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW); + if (ret) { + csky_pmu_free_irq(); + free_percpu(csky_pmu.hw_events); + } + + return ret; +} + +static const struct of_device_id csky_pmu_of_device_ids[] = { + {.compatible = "csky,csky-pmu"}, + {}, +}; + +static int csky_pmu_dev_probe(struct platform_device *pdev) +{ + return csky_pmu_device_probe(pdev, csky_pmu_of_device_ids); +} + +static struct platform_driver csky_pmu_driver = { + .driver = { + .name = "csky-pmu", + .of_match_table = csky_pmu_of_device_ids, + }, + .probe = csky_pmu_dev_probe, +}; + +static int __init csky_pmu_probe(void) +{ + int ret; + + ret = platform_driver_register(&csky_pmu_driver); + if (ret) + pr_notice("[perf] PMU initialization failed\n"); + else + pr_notice("[perf] PMU initialization done\n"); + + return ret; +} + +device_initcall(csky_pmu_probe); diff --git a/arch/csky/kernel/perf_regs.c b/arch/csky/kernel/perf_regs.c new file mode 100644 index 000000000..09b7f88a2 --- /dev/null +++ b/arch/csky/kernel/perf_regs.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <linux/bug.h> +#include <asm/perf_regs.h> +#include <asm/ptrace.h> + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_CSKY_MAX)) + return 0; + + return (u64)*((u32 *)regs + idx); +} + +#define REG_RESERVED (~((1ULL << PERF_REG_CSKY_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_32; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/csky/kernel/power.c b/arch/csky/kernel/power.c new file mode 100644 index 000000000..923ee4e38 --- /dev/null +++ b/arch/csky/kernel/power.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/reboot.h> + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void machine_power_off(void) +{ + local_irq_disable(); + if (pm_power_off) + pm_power_off(); + asm volatile ("bkpt"); +} + +void machine_halt(void) +{ + local_irq_disable(); + if (pm_power_off) + pm_power_off(); + asm volatile ("bkpt"); +} + +void machine_restart(char *cmd) +{ + local_irq_disable(); + do_kernel_restart(cmd); + asm volatile ("bkpt"); +} diff --git a/arch/csky/kernel/probes/Makefile b/arch/csky/kernel/probes/Makefile new file mode 100644 index 000000000..1c7c6e6cb --- /dev/null +++ b/arch/csky/kernel/probes/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o +obj-$(CONFIG_KPROBES) += kprobes_trampoline.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o +obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o + +CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE) diff --git a/arch/csky/kernel/probes/decode-insn.c b/arch/csky/kernel/probes/decode-insn.c new file mode 100644 index 000000000..bbc4edc25 --- /dev/null +++ b/arch/csky/kernel/probes/decode-insn.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/kernel.h> +#include <linux/kprobes.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <asm/sections.h> + +#include "decode-insn.h" +#include "simulate-insn.h" + +/* Return: + * INSN_REJECTED If instruction is one not allowed to kprobe, + * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. + */ +enum probe_insn __kprobes +csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api) +{ + probe_opcode_t insn = le32_to_cpu(*addr); + + CSKY_INSN_SET_SIMULATE(br16, insn); + CSKY_INSN_SET_SIMULATE(bt16, insn); + CSKY_INSN_SET_SIMULATE(bf16, insn); + CSKY_INSN_SET_SIMULATE(jmp16, insn); + CSKY_INSN_SET_SIMULATE(jsr16, insn); + CSKY_INSN_SET_SIMULATE(lrw16, insn); + CSKY_INSN_SET_SIMULATE(pop16, insn); + + CSKY_INSN_SET_SIMULATE(br32, insn); + CSKY_INSN_SET_SIMULATE(bt32, insn); + CSKY_INSN_SET_SIMULATE(bf32, insn); + CSKY_INSN_SET_SIMULATE(jmp32, insn); + CSKY_INSN_SET_SIMULATE(jsr32, insn); + CSKY_INSN_SET_SIMULATE(lrw32, insn); + CSKY_INSN_SET_SIMULATE(pop32, insn); + + CSKY_INSN_SET_SIMULATE(bez32, insn); + CSKY_INSN_SET_SIMULATE(bnez32, insn); + CSKY_INSN_SET_SIMULATE(bnezad32, insn); + CSKY_INSN_SET_SIMULATE(bhsz32, insn); + CSKY_INSN_SET_SIMULATE(bhz32, insn); + CSKY_INSN_SET_SIMULATE(blsz32, insn); + CSKY_INSN_SET_SIMULATE(blz32, insn); + CSKY_INSN_SET_SIMULATE(bsr32, insn); + CSKY_INSN_SET_SIMULATE(jmpi32, insn); + CSKY_INSN_SET_SIMULATE(jsri32, insn); + + return INSN_GOOD; +} diff --git a/arch/csky/kernel/probes/decode-insn.h b/arch/csky/kernel/probes/decode-insn.h new file mode 100644 index 000000000..9c4ad48fe --- /dev/null +++ b/arch/csky/kernel/probes/decode-insn.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __CSKY_KERNEL_KPROBES_DECODE_INSN_H +#define __CSKY_KERNEL_KPROBES_DECODE_INSN_H + +#include <asm/sections.h> +#include <asm/kprobes.h> + +enum probe_insn { + INSN_REJECTED, + INSN_GOOD_NO_SLOT, + INSN_GOOD, +}; + +#define is_insn32(insn) ((insn & 0xc000) == 0xc000) + +enum probe_insn __kprobes +csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi); + +#endif /* __CSKY_KERNEL_KPROBES_DECODE_INSN_H */ diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c new file mode 100644 index 000000000..5264763d0 --- /dev/null +++ b/arch/csky/kernel/probes/ftrace.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/kprobes.h> + +int arch_check_ftrace_location(struct kprobe *p) +{ + if (ftrace_location((unsigned long)p->addr)) + p->flags |= KPROBE_FLAG_FTRACE; + return 0; +} + +/* Ftrace callback handler for kprobes -- called under preepmt disabed */ +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) +{ + bool lr_saver = false; + struct kprobe *p; + struct kprobe_ctlblk *kcb; + + /* Preempt is disabled by ftrace */ + p = get_kprobe((kprobe_opcode_t *)ip); + if (!p) { + p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE)); + if (unlikely(!p) || kprobe_disabled(p)) + return; + lr_saver = true; + } + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + unsigned long orig_ip = instruction_pointer(regs); + + if (lr_saver) + ip -= MCOUNT_INSN_SIZE; + instruction_pointer_set(regs, ip); + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) { + /* + * Emulate singlestep (and also recover regs->pc) + * as if there is a nop + */ + instruction_pointer_set(regs, + (unsigned long)p->addr + MCOUNT_INSN_SIZE); + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + instruction_pointer_set(regs, orig_ip); + } + /* + * If pre_handler returns !0, it changes regs->pc. We have to + * skip emulating post_handler. + */ + __this_cpu_write(current_kprobe, NULL); + } +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.api.insn = NULL; + return 0; +} diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c new file mode 100644 index 000000000..79272dde7 --- /dev/null +++ b/arch/csky/kernel/probes/kprobes.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/kprobes.h> +#include <linux/extable.h> +#include <linux/slab.h> +#include <linux/stop_machine.h> +#include <asm/ptrace.h> +#include <linux/uaccess.h> +#include <asm/sections.h> +#include <asm/cacheflush.h> + +#include "decode-insn.h" + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); + +struct csky_insn_patch { + kprobe_opcode_t *addr; + u32 opcode; + atomic_t cpu_count; +}; + +static int __kprobes patch_text_cb(void *priv) +{ + struct csky_insn_patch *param = priv; + unsigned int addr = (unsigned int)param->addr; + + if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) { + *(u16 *) addr = cpu_to_le16(param->opcode); + dcache_wb_range(addr, addr + 2); + atomic_inc(¶m->cpu_count); + } else { + while (atomic_read(¶m->cpu_count) <= num_online_cpus()) + cpu_relax(); + } + + icache_inv_range(addr, addr + 2); + + return 0; +} + +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +{ + struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) }; + + return stop_machine_cpuslocked(patch_text_cb, ¶m, cpu_online_mask); +} + +static void __kprobes arch_prepare_ss_slot(struct kprobe *p) +{ + unsigned long offset = is_insn32(p->opcode) ? 4 : 2; + + p->ainsn.api.restore = (unsigned long)p->addr + offset; + + patch_text(p->ainsn.api.insn, p->opcode); +} + +static void __kprobes arch_prepare_simulate(struct kprobe *p) +{ + p->ainsn.api.restore = 0; +} + +static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (p->ainsn.api.handler) + p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); + + post_kprobe_handler(kcb, regs); +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + unsigned long probe_addr = (unsigned long)p->addr; + + if (probe_addr & 0x1) { + pr_warn("Address not aligned.\n"); + return -EINVAL; + } + + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); + + /* decode instruction */ + switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) { + case INSN_REJECTED: /* insn not supported */ + return -EINVAL; + + case INSN_GOOD_NO_SLOT: /* insn need simulation */ + p->ainsn.api.insn = NULL; + break; + + case INSN_GOOD: /* instruction uses slot */ + p->ainsn.api.insn = get_insn_slot(); + if (!p->ainsn.api.insn) + return -ENOMEM; + break; + } + + /* prepare the instruction */ + if (p->ainsn.api.insn) + arch_prepare_ss_slot(p); + else + arch_prepare_simulate(p); + + return 0; +} + +/* install breakpoint in text */ +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, USR_BKPT); +} + +/* remove breakpoint from text */ +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + patch_text(p->addr, p->opcode); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (p->ainsn.api.insn) { + free_insn_slot(p->ainsn.api.insn, 0); + p->ainsn.api.insn = NULL; + } +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + +/* + * Interrupts need to be disabled before single-step mode is set, and not + * reenabled until after single-step mode ends. + * Without disabling interrupt on local CPU, there is a chance of + * interrupt occurrence in the period of exception return and start of + * out-of-line single-step, that result in wrongly single stepping + * into the interrupt handler. + */ +static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + kcb->saved_sr = regs->sr; + regs->sr &= ~BIT(6); +} + +static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, + struct pt_regs *regs) +{ + regs->sr = kcb->saved_sr; +} + +static void __kprobes +set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p) +{ + unsigned long offset = is_insn32(p->opcode) ? 4 : 2; + + kcb->ss_ctx.ss_pending = true; + kcb->ss_ctx.match_addr = addr + offset; +} + +static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) +{ + kcb->ss_ctx.ss_pending = false; + kcb->ss_ctx.match_addr = 0; +} + +#define TRACE_MODE_SI BIT(14) +#define TRACE_MODE_MASK ~(0x3 << 14) +#define TRACE_MODE_RUN 0 + +static void __kprobes setup_singlestep(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + unsigned long slot; + + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + if (p->ainsn.api.insn) { + /* prepare for single stepping */ + slot = (unsigned long)p->ainsn.api.insn; + + set_ss_context(kcb, slot, p); /* mark pending ss */ + + /* IRQs and single stepping do not mix well. */ + kprobes_save_local_irqflag(kcb, regs); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI; + instruction_pointer_set(regs, slot); + } else { + /* insn simulation */ + arch_simulate_insn(p, regs); + } +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + + return 1; +} + +static void __kprobes +post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + + if (!cur) + return; + + /* return addr restore if non-branching insn */ + if (cur->ainsn.api.restore != 0) + regs->pc = cur->ainsn.api.restore; + + /* restore back original saved kprobe variables and continue */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + return; + } + + /* call post handler */ + kcb->kprobe_status = KPROBE_HIT_SSDONE; + if (cur->post_handler) { + /* post_handler can hit breakpoint and single step + * again, so we enable D-flag for recursive exception. + */ + cur->post_handler(cur, regs, 0); + } + + reset_current_kprobe(); +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + switch (kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the ip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->pc = (unsigned long) cur->addr; + if (!instruction_pointer(regs)) + BUG(); + + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accounting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if (fixup_exception(regs)) + return 1; + } + return 0; +} + +int __kprobes +kprobe_breakpoint_handler(struct pt_regs *regs) +{ + struct kprobe *p, *cur_kprobe; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + kcb = get_kprobe_ctlblk(); + cur_kprobe = kprobe_running(); + + p = get_kprobe((kprobe_opcode_t *) addr); + + if (p) { + if (cur_kprobe) { + if (reenter_kprobe(p, regs, kcb)) + return 1; + } else { + /* Probe hit */ + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, it will + * modify the execution path and no need to single + * stepping. Let's just reset current kprobe and exit. + * + * pre_handler can hit a breakpoint and can step thru + * before return. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); + } + return 1; + } + + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + * Return back to original instruction, and continue. + */ + return 0; +} + +int __kprobes +kprobe_single_step_handler(struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if ((kcb->ss_ctx.ss_pending) + && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) { + clear_ss_context(kcb); /* clear pending ss */ + + kprobes_restore_local_irqflag(kcb, regs); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN; + + post_kprobe_handler(kcb, regs); + return 1; + } + return 0; +} + +/* + * Provide a blacklist of symbols identifying ranges which cannot be kprobed. + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). + */ +int __init arch_populate_kprobe_blacklist(void) +{ + int ret; + + ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); + return ret; +} + +void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) +{ + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); +} + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *)regs->lr; + ri->fp = NULL; + regs->lr = (unsigned long) &kretprobe_trampoline; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} + +int __init arch_init_kprobes(void) +{ + return 0; +} diff --git a/arch/csky/kernel/probes/kprobes_trampoline.S b/arch/csky/kernel/probes/kprobes_trampoline.S new file mode 100644 index 000000000..b1fe3af24 --- /dev/null +++ b/arch/csky/kernel/probes/kprobes_trampoline.S @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#include <linux/linkage.h> + +#include <abi/entry.h> + +ENTRY(kretprobe_trampoline) + SAVE_REGS_FTRACE + + mov a0, sp /* pt_regs */ + + jbsr trampoline_probe_handler + + /* use the result as the return-address */ + mov lr, a0 + + RESTORE_REGS_FTRACE + rts +ENDPROC(kretprobe_trampoline) diff --git a/arch/csky/kernel/probes/simulate-insn.c b/arch/csky/kernel/probes/simulate-insn.c new file mode 100644 index 000000000..4e464fed5 --- /dev/null +++ b/arch/csky/kernel/probes/simulate-insn.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/kprobes.h> + +#include "decode-insn.h" +#include "simulate-insn.h" + +static inline bool csky_insn_reg_get_val(struct pt_regs *regs, + unsigned long index, + unsigned long *ptr) +{ + if (index < 14) + *ptr = *(®s->a0 + index); + + if (index > 15 && index < 31) + *ptr = *(®s->exregs[0] + index - 16); + + switch (index) { + case 14: + *ptr = regs->usp; + break; + case 15: + *ptr = regs->lr; + break; + case 31: + *ptr = regs->tls; + break; + default: + goto fail; + } + + return true; +fail: + return false; +} + +static inline bool csky_insn_reg_set_val(struct pt_regs *regs, + unsigned long index, + unsigned long val) +{ + if (index < 14) + *(®s->a0 + index) = val; + + if (index > 15 && index < 31) + *(®s->exregs[0] + index - 16) = val; + + switch (index) { + case 14: + regs->usp = val; + break; + case 15: + regs->lr = val; + break; + case 31: + regs->tls = val; + break; + default: + goto fail; + } + + return true; +fail: + return false; +} + +void __kprobes +simulate_br16(u32 opcode, long addr, struct pt_regs *regs) +{ + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); +} + +void __kprobes +simulate_br32(u32 opcode, long addr, struct pt_regs *regs) +{ + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); +} + +void __kprobes +simulate_bt16(u32 opcode, long addr, struct pt_regs *regs) +{ + if (regs->sr & 1) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); + else + instruction_pointer_set(regs, addr + 2); +} + +void __kprobes +simulate_bt32(u32 opcode, long addr, struct pt_regs *regs) +{ + if (regs->sr & 1) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bf16(u32 opcode, long addr, struct pt_regs *regs) +{ + if (!(regs->sr & 1)) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); + else + instruction_pointer_set(regs, addr + 2); +} + +void __kprobes +simulate_bf32(u32 opcode, long addr, struct pt_regs *regs) +{ + if (!(regs->sr & 1)) + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_jmp16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = (opcode >> 2) & 0xf; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_jmp32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_jsr16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = (opcode >> 2) & 0xf; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + regs->lr = addr + 2; + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_jsr32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + regs->lr = addr + 4; + + instruction_pointer_set(regs, tmp & 0xfffffffe); +} + +void __kprobes +simulate_lrw16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long tmp = (opcode & 0x300) >> 3; + unsigned long offset = ((opcode & 0x1f) | tmp) << 2; + + tmp = (opcode & 0xe0) >> 5; + + val = *(unsigned int *)(instruction_pointer(regs) + offset); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_lrw32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long offset = (opcode & 0xffff0000) >> 14; + unsigned long tmp = opcode & 0x0000001f; + + val = *(unsigned int *) + ((instruction_pointer(regs) + offset) & 0xfffffffc); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_pop16(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long *tmp = (unsigned long *)regs->usp; + int i; + + for (i = 0; i < (opcode & 0xf); i++) { + csky_insn_reg_set_val(regs, i + 4, *tmp); + tmp += 1; + } + + if (opcode & 0x10) { + csky_insn_reg_set_val(regs, 15, *tmp); + tmp += 1; + } + + regs->usp = (unsigned long)tmp; + + instruction_pointer_set(regs, regs->lr); +} + +void __kprobes +simulate_pop32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long *tmp = (unsigned long *)regs->usp; + int i; + + for (i = 0; i < ((opcode & 0xf0000) >> 16); i++) { + csky_insn_reg_set_val(regs, i + 4, *tmp); + tmp += 1; + } + + if (opcode & 0x100000) { + csky_insn_reg_set_val(regs, 15, *tmp); + tmp += 1; + } + + for (i = 0; i < ((opcode & 0xe00000) >> 21); i++) { + csky_insn_reg_set_val(regs, i + 16, *tmp); + tmp += 1; + } + + if (opcode & 0x1000000) { + csky_insn_reg_set_val(regs, 29, *tmp); + tmp += 1; + } + + regs->usp = (unsigned long)tmp; + + instruction_pointer_set(regs, regs->lr); +} + +void __kprobes +simulate_bez32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + if (tmp == 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bnez32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + + csky_insn_reg_get_val(regs, tmp, &tmp); + + if (tmp != 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); +} + +void __kprobes +simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + val -= 1; + + if (val > 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if (val >= 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if (val > 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if (val <= 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_blz32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp = opcode & 0x1f; + unsigned long val; + + csky_insn_reg_get_val(regs, tmp, &val); + + if (val < 0) { + instruction_pointer_set(regs, + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); + } else + instruction_pointer_set(regs, addr + 4); + + csky_insn_reg_set_val(regs, tmp, val); +} + +void __kprobes +simulate_bsr32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long tmp; + + tmp = (opcode & 0xffff) << 16; + tmp |= (opcode & 0xffff0000) >> 16; + + instruction_pointer_set(regs, + addr + sign_extend32((tmp & 0x3ffffff) << 1, 15)); + + regs->lr = addr + 4; +} + +void __kprobes +simulate_jmpi32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long offset = ((opcode & 0xffff0000) >> 14); + + val = *(unsigned int *) + ((instruction_pointer(regs) + offset) & 0xfffffffc); + + instruction_pointer_set(regs, val); +} + +void __kprobes +simulate_jsri32(u32 opcode, long addr, struct pt_regs *regs) +{ + unsigned long val; + unsigned long offset = ((opcode & 0xffff0000) >> 14); + + val = *(unsigned int *) + ((instruction_pointer(regs) + offset) & 0xfffffffc); + + regs->lr = addr + 4; + + instruction_pointer_set(regs, val); +} diff --git a/arch/csky/kernel/probes/simulate-insn.h b/arch/csky/kernel/probes/simulate-insn.h new file mode 100644 index 000000000..ba4cb7ef0 --- /dev/null +++ b/arch/csky/kernel/probes/simulate-insn.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __CSKY_KERNEL_PROBES_SIMULATE_INSN_H +#define __CSKY_KERNEL_PROBES_SIMULATE_INSN_H + +#define __CSKY_INSN_FUNCS(name, mask, val) \ +static __always_inline bool csky_insn_is_##name(probe_opcode_t code) \ +{ \ + BUILD_BUG_ON(~(mask) & (val)); \ + return (code & (mask)) == (val); \ +} \ +void simulate_##name(u32 opcode, long addr, struct pt_regs *regs); + +#define CSKY_INSN_SET_SIMULATE(name, code) \ + do { \ + if (csky_insn_is_##name(code)) { \ + api->handler = simulate_##name; \ + return INSN_GOOD_NO_SLOT; \ + } \ + } while (0) + +__CSKY_INSN_FUNCS(br16, 0xfc00, 0x0400) +__CSKY_INSN_FUNCS(bt16, 0xfc00, 0x0800) +__CSKY_INSN_FUNCS(bf16, 0xfc00, 0x0c00) +__CSKY_INSN_FUNCS(jmp16, 0xffc3, 0x7800) +__CSKY_INSN_FUNCS(jsr16, 0xffc3, 0x7801) +__CSKY_INSN_FUNCS(lrw16, 0xfc00, 0x1000) +__CSKY_INSN_FUNCS(pop16, 0xffe0, 0x1480) + +__CSKY_INSN_FUNCS(br32, 0x0000ffff, 0x0000e800) +__CSKY_INSN_FUNCS(bt32, 0x0000ffff, 0x0000e860) +__CSKY_INSN_FUNCS(bf32, 0x0000ffff, 0x0000e840) +__CSKY_INSN_FUNCS(jmp32, 0xffffffe0, 0x0000e8c0) +__CSKY_INSN_FUNCS(jsr32, 0xffffffe0, 0x0000e8e0) +__CSKY_INSN_FUNCS(lrw32, 0x0000ffe0, 0x0000ea80) +__CSKY_INSN_FUNCS(pop32, 0xfe00ffff, 0x0000ebc0) + +__CSKY_INSN_FUNCS(bez32, 0x0000ffe0, 0x0000e900) +__CSKY_INSN_FUNCS(bnez32, 0x0000ffe0, 0x0000e920) +__CSKY_INSN_FUNCS(bnezad32, 0x0000ffe0, 0x0000e820) +__CSKY_INSN_FUNCS(bhsz32, 0x0000ffe0, 0x0000e9a0) +__CSKY_INSN_FUNCS(bhz32, 0x0000ffe0, 0x0000e940) +__CSKY_INSN_FUNCS(blsz32, 0x0000ffe0, 0x0000e960) +__CSKY_INSN_FUNCS(blz32, 0x0000ffe0, 0x0000e980) +__CSKY_INSN_FUNCS(bsr32, 0x0000fc00, 0x0000e000) +__CSKY_INSN_FUNCS(jmpi32, 0x0000ffff, 0x0000eac0) +__CSKY_INSN_FUNCS(jsri32, 0x0000ffff, 0x0000eae0) + +#endif /* __CSKY_KERNEL_PROBES_SIMULATE_INSN_H */ diff --git a/arch/csky/kernel/probes/uprobes.c b/arch/csky/kernel/probes/uprobes.c new file mode 100644 index 000000000..1a9e0961b --- /dev/null +++ b/arch/csky/kernel/probes/uprobes.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com> + */ +#include <linux/highmem.h> +#include <linux/ptrace.h> +#include <linux/uprobes.h> +#include <asm/cacheflush.h> + +#include "decode-insn.h" + +#define UPROBE_TRAP_NR UINT_MAX + +bool is_swbp_insn(uprobe_opcode_t *insn) +{ + return (*insn & 0xffff) == UPROBE_SWBP_INSN; +} + +unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) +{ + return instruction_pointer(regs); +} + +int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, + unsigned long addr) +{ + probe_opcode_t insn; + + insn = *(probe_opcode_t *)(&auprobe->insn[0]); + + auprobe->insn_size = is_insn32(insn) ? 4 : 2; + + switch (csky_probe_decode_insn(&insn, &auprobe->api)) { + case INSN_REJECTED: + return -EINVAL; + + case INSN_GOOD_NO_SLOT: + auprobe->simulate = true; + break; + + default: + break; + } + + return 0; +} + +int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + utask->autask.saved_trap_no = current->thread.trap_no; + current->thread.trap_no = UPROBE_TRAP_NR; + + instruction_pointer_set(regs, utask->xol_vaddr); + + user_enable_single_step(current); + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR); + + instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size); + + user_disable_single_step(current); + + return 0; +} + +bool arch_uprobe_xol_was_trapped(struct task_struct *t) +{ + if (t->thread.trap_no != UPROBE_TRAP_NR) + return true; + + return false; +} + +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + probe_opcode_t insn; + unsigned long addr; + + if (!auprobe->simulate) + return false; + + insn = *(probe_opcode_t *)(&auprobe->insn[0]); + addr = instruction_pointer(regs); + + if (auprobe->api.handler) + auprobe->api.handler(insn, addr, regs); + + return true; +} + +void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* + * Task has received a fatal signal, so reset back to probbed + * address. + */ + instruction_pointer_set(regs, utask->vaddr); + + user_disable_single_step(current); +} + +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CHAIN_CALL) + return regs->usp <= ret->stack; + else + return regs->usp < ret->stack; +} + +unsigned long +arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, + struct pt_regs *regs) +{ + unsigned long ra; + + ra = regs->lr; + + regs->lr = trampoline_vaddr; + + return ra; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} + +int uprobe_breakpoint_handler(struct pt_regs *regs) +{ + if (uprobe_pre_sstep_notifier(regs)) + return 1; + + return 0; +} + +int uprobe_single_step_handler(struct pt_regs *regs) +{ + if (uprobe_post_sstep_notifier(regs)) + return 1; + + return 0; +} diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c new file mode 100644 index 000000000..3d0ca22cd --- /dev/null +++ b/arch/csky/kernel/process.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/module.h> +#include <linux/version.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/debug.h> +#include <linux/delay.h> +#include <linux/kallsyms.h> +#include <linux/uaccess.h> +#include <linux/ptrace.h> + +#include <asm/elf.h> +#include <abi/reg_ops.h> + +struct cpuinfo_csky cpu_data[NR_CPUS]; + +#ifdef CONFIG_STACKPROTECTOR +#include <linux/stackprotector.h> +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + +asmlinkage void ret_from_fork(void); +asmlinkage void ret_from_kernel_thread(void); + +/* + * Some archs flush debug and FPU info here + */ +void flush_thread(void){} + +int copy_thread(unsigned long clone_flags, + unsigned long usp, + unsigned long kthread_arg, + struct task_struct *p, + unsigned long tls) +{ + struct switch_stack *childstack; + struct pt_regs *childregs = task_pt_regs(p); + +#ifdef CONFIG_CPU_HAS_FPU + save_to_user_fp(&p->thread.user_fp); +#endif + + childstack = ((struct switch_stack *) childregs) - 1; + memset(childstack, 0, sizeof(struct switch_stack)); + + /* setup thread.sp for switch_to !!! */ + p->thread.sp = (unsigned long)childstack; + + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { + memset(childregs, 0, sizeof(struct pt_regs)); + childstack->r15 = (unsigned long) ret_from_kernel_thread; + childstack->r10 = kthread_arg; + childstack->r9 = usp; + childregs->sr = mfcr("psr"); + } else { + *childregs = *(current_pt_regs()); + if (usp) + childregs->usp = usp; + if (clone_flags & CLONE_SETTLS) + task_thread_info(p)->tp_value = childregs->tls + = tls; + + childregs->a0 = 0; + childstack->r15 = (unsigned long) ret_from_fork; + } + + return 0; +} + +/* Fill in the fpu structure for a core dump. */ +int dump_fpu(struct pt_regs *regs, struct user_fp *fpu) +{ + memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu)); + return 1; +} +EXPORT_SYMBOL(dump_fpu); + +int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs) +{ + struct pt_regs *regs = task_pt_regs(tsk); + + /* NOTE: usp is error value. */ + ELF_CORE_COPY_REGS((*pr_regs), regs) + + return 1; +} + +#ifndef CONFIG_CPU_PM_NONE +void arch_cpu_idle(void) +{ +#ifdef CONFIG_CPU_PM_WAIT + asm volatile("wait\n"); +#endif + +#ifdef CONFIG_CPU_PM_DOZE + asm volatile("doze\n"); +#endif + +#ifdef CONFIG_CPU_PM_STOP + asm volatile("stop\n"); +#endif + raw_local_irq_enable(); +} +#endif diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c new file mode 100644 index 000000000..ac07695bc --- /dev/null +++ b/arch/csky/kernel/ptrace.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/audit.h> +#include <linux/elf.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/ptrace.h> +#include <linux/regset.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/signal.h> +#include <linux/smp.h> +#include <linux/tracehook.h> +#include <linux/uaccess.h> +#include <linux/user.h> + +#include <asm/thread_info.h> +#include <asm/page.h> +#include <asm/processor.h> +#include <asm/asm-offsets.h> + +#include <abi/regdef.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> + +/* sets the trace bits. */ +#define TRACE_MODE_SI (1 << 14) +#define TRACE_MODE_RUN 0 +#define TRACE_MODE_MASK ~(0x3 << 14) + +/* + * Make sure the single step bit is not set. + */ +static void singlestep_disable(struct task_struct *tsk) +{ + struct pt_regs *regs; + + regs = task_pt_regs(tsk); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN; + + /* Enable irq */ + regs->sr |= BIT(6); +} + +static void singlestep_enable(struct task_struct *tsk) +{ + struct pt_regs *regs; + + regs = task_pt_regs(tsk); + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI; + + /* Disable irq */ + regs->sr &= ~BIT(6); +} + +/* + * Make sure the single step bit is set. + */ +void user_enable_single_step(struct task_struct *child) +{ + singlestep_enable(child); +} + +void user_disable_single_step(struct task_struct *child) +{ + singlestep_disable(child); +} + +enum csky_regset { + REGSET_GPR, + REGSET_FPR, +}; + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct pt_regs *regs = task_pt_regs(target); + + /* Abiv1 regs->tls is fake and we need sync here. */ + regs->tls = task_thread_info(target)->tp_value; + + return membuf_write(&to, regs, sizeof(*regs)); +} + +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct pt_regs regs; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, -1); + if (ret) + return ret; + + /* BIT(0) of regs.sr is Condition Code/Carry bit */ + regs.sr = (regs.sr & BIT(0)) | (task_pt_regs(target)->sr & ~BIT(0)); +#ifdef CONFIG_CPU_HAS_HILO + regs.dcsr = task_pt_regs(target)->dcsr; +#endif + task_thread_info(target)->tp_value = regs.tls; + + *task_pt_regs(target) = regs; + + return 0; +} + +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct user_fp *regs = (struct user_fp *)&target->thread.user_fp; + +#if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP) + int i; + struct user_fp tmp = *regs; + + for (i = 0; i < 16; i++) { + tmp.vr[i*4] = regs->vr[i*2]; + tmp.vr[i*4 + 1] = regs->vr[i*2 + 1]; + } + + for (i = 0; i < 32; i++) + tmp.vr[64 + i] = regs->vr[32 + i]; + + return membuf_write(&to, &tmp, sizeof(tmp)); +#else + return membuf_write(&to, regs, sizeof(*regs)); +#endif +} + +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_fp *regs = (struct user_fp *)&target->thread.user_fp; + +#if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP) + int i; + struct user_fp tmp; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tmp, 0, -1); + + *regs = tmp; + + for (i = 0; i < 16; i++) { + regs->vr[i*2] = tmp.vr[i*4]; + regs->vr[i*2 + 1] = tmp.vr[i*4 + 1]; + } + + for (i = 0; i < 32; i++) + regs->vr[32 + i] = tmp.vr[64 + i]; +#else + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1); +#endif + + return ret; +} + +static const struct user_regset csky_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = sizeof(struct pt_regs) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = gpr_get, + .set = gpr_set, + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fp) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .regset_get = fpr_get, + .set = fpr_set, + }, +}; + +static const struct user_regset_view user_csky_view = { + .name = "csky", + .e_machine = ELF_ARCH, + .regsets = csky_regsets, + .n = ARRAY_SIZE(csky_regsets), +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_csky_view; +} + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + REG_OFFSET_NAME(tls), + REG_OFFSET_NAME(lr), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(sr), + REG_OFFSET_NAME(usp), + REG_OFFSET_NAME(orig_a0), + REG_OFFSET_NAME(a0), + REG_OFFSET_NAME(a1), + REG_OFFSET_NAME(a2), + REG_OFFSET_NAME(a3), + REG_OFFSET_NAME(regs[0]), + REG_OFFSET_NAME(regs[1]), + REG_OFFSET_NAME(regs[2]), + REG_OFFSET_NAME(regs[3]), + REG_OFFSET_NAME(regs[4]), + REG_OFFSET_NAME(regs[5]), + REG_OFFSET_NAME(regs[6]), + REG_OFFSET_NAME(regs[7]), + REG_OFFSET_NAME(regs[8]), + REG_OFFSET_NAME(regs[9]), +#if defined(__CSKYABIV2__) + REG_OFFSET_NAME(exregs[0]), + REG_OFFSET_NAME(exregs[1]), + REG_OFFSET_NAME(exregs[2]), + REG_OFFSET_NAME(exregs[3]), + REG_OFFSET_NAME(exregs[4]), + REG_OFFSET_NAME(exregs[5]), + REG_OFFSET_NAME(exregs[6]), + REG_OFFSET_NAME(exregs[7]), + REG_OFFSET_NAME(exregs[8]), + REG_OFFSET_NAME(exregs[9]), + REG_OFFSET_NAME(exregs[10]), + REG_OFFSET_NAME(exregs[11]), + REG_OFFSET_NAME(exregs[12]), + REG_OFFSET_NAME(exregs[13]), + REG_OFFSET_NAME(exregs[14]), + REG_OFFSET_NAME(rhi), + REG_OFFSET_NAME(rlo), + REG_OFFSET_NAME(dcsr), +#endif + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return (addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + +void ptrace_disable(struct task_struct *child) +{ + singlestep_disable(child); +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + long ret = -EIO; + + switch (request) { + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +asmlinkage int syscall_trace_enter(struct pt_regs *regs) +{ + if (test_thread_flag(TIF_SYSCALL_TRACE)) + if (tracehook_report_syscall_entry(regs)) + return -1; + + if (secure_computing() == -1) + return -1; + + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_enter(regs, syscall_get_nr(current, regs)); + + audit_syscall_entry(regs_syscallid(regs), regs->a0, regs->a1, regs->a2, regs->a3); + return 0; +} + +asmlinkage void syscall_trace_exit(struct pt_regs *regs) +{ + audit_syscall_exit(regs); + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, 0); + + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_exit(regs, syscall_get_return_value(current, regs)); +} + +void show_regs(struct pt_regs *fp) +{ + pr_info("\nCURRENT PROCESS:\n\n"); + pr_info("COMM=%s PID=%d\n", current->comm, current->pid); + + if (current->mm) { + pr_info("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n", + (int) current->mm->start_code, + (int) current->mm->end_code, + (int) current->mm->start_data, + (int) current->mm->end_data, + (int) current->mm->end_data, + (int) current->mm->brk); + pr_info("USER-STACK=%08x KERNEL-STACK=%08x\n\n", + (int) current->mm->start_stack, + (int) (((unsigned long) current) + 2 * PAGE_SIZE)); + } + + pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc); + pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr); + pr_info("SP: 0x%08lx\n", (long)fp); + pr_info("orig_a0: 0x%08lx\n", fp->orig_a0); + pr_info("PSR: 0x%08lx\n", (long)fp->sr); + + pr_info(" a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n", + fp->a0, fp->a1, fp->a2, fp->a3); +#if defined(__CSKYABIV2__) + pr_info(" r4: 0x%08lx r5: 0x%08lx r6: 0x%08lx r7: 0x%08lx\n", + fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); + pr_info(" r8: 0x%08lx r9: 0x%08lx r10: 0x%08lx r11: 0x%08lx\n", + fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); + pr_info("r12: 0x%08lx r13: 0x%08lx r15: 0x%08lx\n", + fp->regs[8], fp->regs[9], fp->lr); + pr_info("r16: 0x%08lx r17: 0x%08lx r18: 0x%08lx r19: 0x%08lx\n", + fp->exregs[0], fp->exregs[1], fp->exregs[2], fp->exregs[3]); + pr_info("r20: 0x%08lx r21: 0x%08lx r22: 0x%08lx r23: 0x%08lx\n", + fp->exregs[4], fp->exregs[5], fp->exregs[6], fp->exregs[7]); + pr_info("r24: 0x%08lx r25: 0x%08lx r26: 0x%08lx r27: 0x%08lx\n", + fp->exregs[8], fp->exregs[9], fp->exregs[10], fp->exregs[11]); + pr_info("r28: 0x%08lx r29: 0x%08lx r30: 0x%08lx tls: 0x%08lx\n", + fp->exregs[12], fp->exregs[13], fp->exregs[14], fp->tls); + pr_info(" hi: 0x%08lx lo: 0x%08lx\n", + fp->rhi, fp->rlo); +#else + pr_info(" r6: 0x%08lx r7: 0x%08lx r8: 0x%08lx r9: 0x%08lx\n", + fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); + pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n", + fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); + pr_info("r14: 0x%08lx r1: 0x%08lx\n", + fp->regs[8], fp->regs[9]); +#endif + + return; +} diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c new file mode 100644 index 000000000..e4cab1605 --- /dev/null +++ b/arch/csky/kernel/setup.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/console.h> +#include <linux/memblock.h> +#include <linux/initrd.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/start_kernel.h> +#include <linux/dma-map-ops.h> +#include <linux/screen_info.h> +#include <asm/sections.h> +#include <asm/mmu_context.h> +#include <asm/pgalloc.h> + +#ifdef CONFIG_DUMMY_CONSOLE +struct screen_info screen_info = { + .orig_video_lines = 30, + .orig_video_cols = 80, + .orig_video_mode = 0, + .orig_video_ega_bx = 0, + .orig_video_isVGA = 1, + .orig_video_points = 8 +}; +#endif + +static void __init csky_memblock_init(void) +{ + unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); + unsigned long sseg_size = PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET); + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; + signed long size; + + memblock_reserve(__pa(_stext), _end - _stext); + + early_init_fdt_reserve_self(); + early_init_fdt_scan_reserved_mem(); + + memblock_dump_all(); + + min_low_pfn = PFN_UP(memblock_start_of_DRAM()); + max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM()); + + size = max_pfn - min_low_pfn; + + if (size >= lowmem_size) { + max_low_pfn = min_low_pfn + lowmem_size; + write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE); + } else if (size > sseg_size) { + max_low_pfn = min_low_pfn + sseg_size; + } + + max_zone_pfn[ZONE_NORMAL] = max_low_pfn; + +#ifdef CONFIG_HIGHMEM + max_zone_pfn[ZONE_HIGHMEM] = max_pfn; + + highstart_pfn = max_low_pfn; + highend_pfn = max_pfn; +#endif + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + + dma_contiguous_reserve(0); + + free_area_init(max_zone_pfn); +} + +void __init setup_arch(char **cmdline_p) +{ + *cmdline_p = boot_command_line; + + console_verbose(); + + pr_info("Phys. mem: %ldMB\n", + (unsigned long) memblock_phys_mem_size()/1024/1024); + + init_mm.start_code = (unsigned long) _stext; + init_mm.end_code = (unsigned long) _etext; + init_mm.end_data = (unsigned long) _edata; + init_mm.brk = (unsigned long) _end; + + parse_early_param(); + + csky_memblock_init(); + + unflatten_and_copy_device_tree(); + +#ifdef CONFIG_SMP + setup_smp(); +#endif + + sparse_init(); + + fixaddr_init(); + +#ifdef CONFIG_HIGHMEM + kmap_init(); +#endif +} + +unsigned long va_pa_offset; +EXPORT_SYMBOL(va_pa_offset); + +asmlinkage __visible void __init csky_start(unsigned int unused, + void *dtb_start) +{ + /* Clean up bss section */ + memset(__bss_start, 0, __bss_stop - __bss_start); + + va_pa_offset = read_mmu_msa0() & ~(SSEG_SIZE - 1); + + pre_trap_init(); + pre_mmu_init(); + + if (dtb_start == NULL) + early_init_dt_scan(__dtb_start); + else + early_init_dt_scan(dtb_start); + + start_kernel(); + + asm volatile("br .\n"); +} diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c new file mode 100644 index 000000000..f7c1677e5 --- /dev/null +++ b/arch/csky/kernel/signal.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/signal.h> +#include <linux/uaccess.h> +#include <linux/syscalls.h> +#include <linux/tracehook.h> + +#include <asm/traps.h> +#include <asm/ucontext.h> +#include <asm/vdso.h> + +#include <abi/regdef.h> + +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +static int restore_fpu_state(struct sigcontext __user *sc) +{ + int err = 0; + struct user_fp user_fp; + + err = __copy_from_user(&user_fp, &sc->sc_user_fp, sizeof(user_fp)); + + restore_from_user_fp(&user_fp); + + return err; +} + +static int save_fpu_state(struct sigcontext __user *sc) +{ + struct user_fp user_fp; + + save_to_user_fp(&user_fp); + + return __copy_to_user(&sc->sc_user_fp, &user_fp, sizeof(user_fp)); +} +#else +#define restore_fpu_state(sigcontext) (0) +#define save_fpu_state(sigcontext) (0) +#endif + +struct rt_sigframe { + /* + * pad[3] is compatible with the same struct defined in + * gcc/libgcc/config/csky/linux-unwind.h + */ + int pad[3]; + struct siginfo info; + struct ucontext uc; +}; + +static long restore_sigcontext(struct pt_regs *regs, + struct sigcontext __user *sc) +{ + int err = 0; + unsigned long sr = regs->sr; + + /* sc_pt_regs is structured the same as the start of pt_regs */ + err |= __copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs)); + + /* BIT(0) of regs->sr is Condition Code/Carry bit */ + regs->sr = (sr & ~1) | (regs->sr & 1); + + /* Restore the floating-point state. */ + err |= restore_fpu_state(sc); + + return err; +} + +SYSCALL_DEFINE0(rt_sigreturn) +{ + struct pt_regs *regs = current_pt_regs(); + struct rt_sigframe __user *frame; + sigset_t set; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + frame = (struct rt_sigframe __user *)regs->usp; + + if (!access_ok(frame, sizeof(*frame))) + goto badframe; + + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + set_current_blocked(&set); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) + goto badframe; + + if (restore_altstack(&frame->uc.uc_stack)) + goto badframe; + + return regs->a0; + +badframe: + force_sig(SIGSEGV); + return 0; +} + +static int setup_sigcontext(struct rt_sigframe __user *frame, + struct pt_regs *regs) +{ + struct sigcontext __user *sc = &frame->uc.uc_mcontext; + int err = 0; + + err |= __copy_to_user(&sc->sc_pt_regs, regs, sizeof(struct pt_regs)); + err |= save_fpu_state(sc); + + return err; +} + +static inline void __user *get_sigframe(struct ksignal *ksig, + struct pt_regs *regs, size_t framesize) +{ + unsigned long sp; + /* Default to using normal stack */ + sp = regs->usp; + + /* + * If we are on the alternate signal stack and would overflow it, don't. + * Return an always-bogus address instead so we will die with SIGSEGV. + */ + if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) + return (void __user __force *)(-1UL); + + /* This is the X/Open sanctioned signal stack switching. */ + sp = sigsp(sp, ksig) - framesize; + + /* Align the stack frame. */ + sp &= -8UL; + + return (void __user *)sp; +} + +static int +setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + int err = 0; + struct csky_vdso *vdso = current->mm->context.vdso; + + frame = get_sigframe(ksig, regs, sizeof(*frame)); + if (!access_ok(frame, sizeof(*frame))) + return -EFAULT; + + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(NULL, &frame->uc.uc_link); + err |= __save_altstack(&frame->uc.uc_stack, regs->usp); + err |= setup_sigcontext(frame, regs); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + if (err) + return -EFAULT; + + /* Set up to return from userspace. */ + regs->lr = (unsigned long)(vdso->rt_signal_retcode); + + /* + * Set up registers for signal handler. + * Registers that we don't modify keep the value they had from + * user-space at the time we took the signal. + * We always pass siginfo and mcontext, regardless of SA_SIGINFO, + * since some things rely on this (e.g. glibc's debug/segfault.c). + */ + regs->pc = (unsigned long)ksig->ka.sa.sa_handler; + regs->usp = (unsigned long)frame; + regs->a0 = ksig->sig; /* a0: signal number */ + regs->a1 = (unsigned long)(&(frame->info)); /* a1: siginfo pointer */ + regs->a2 = (unsigned long)(&(frame->uc)); /* a2: ucontext pointer */ + + return 0; +} + +static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int ret; + + rseq_signal_deliver(ksig, regs); + + /* Are we from a system call? */ + if (in_syscall(regs)) { + /* Avoid additional syscall restarting via ret_from_exception */ + forget_syscall(regs); + + /* If so, check system call restarting.. */ + switch (regs->a0) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + regs->a0 = -EINTR; + break; + + case -ERESTARTSYS: + if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { + regs->a0 = -EINTR; + break; + } + fallthrough; + case -ERESTARTNOINTR: + regs->a0 = regs->orig_a0; + regs->pc -= TRAP0_SIZE; + break; + } + } + + /* Set up the stack frame */ + ret = setup_rt_frame(ksig, oldset, regs); + + signal_setup_done(ret, ksig, 0); +} + +static void do_signal(struct pt_regs *regs) +{ + struct ksignal ksig; + + if (get_signal(&ksig)) { + /* Actually deliver the signal */ + handle_signal(&ksig, regs); + return; + } + + /* Did we come from a system call? */ + if (in_syscall(regs)) { + /* Avoid additional syscall restarting via ret_from_exception */ + forget_syscall(regs); + + /* Restart the system call - no handlers present */ + switch (regs->a0) { + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + regs->a0 = regs->orig_a0; + regs->pc -= TRAP0_SIZE; + break; + case -ERESTART_RESTARTBLOCK: + regs->a0 = regs->orig_a0; + regs_syscallid(regs) = __NR_restart_syscall; + regs->pc -= TRAP0_SIZE; + break; + } + } + + /* + * If there is no signal to deliver, we just put the saved + * sigmask back. + */ + restore_saved_sigmask(); +} + +/* + * notification of userspace execution resumption + * - triggered by the _TIF_WORK_MASK flags + */ +asmlinkage void do_notify_resume(struct pt_regs *regs, + unsigned long thread_info_flags) +{ + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + + /* Handle pending signal delivery */ + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + + if (thread_info_flags & _TIF_NOTIFY_RESUME) { + tracehook_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); + } +} diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c new file mode 100644 index 000000000..1a8d7eaf1 --- /dev/null +++ b/arch/csky/kernel/smp.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/kernel_stat.h> +#include <linux/notifier.h> +#include <linux/cpu.h> +#include <linux/percpu.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/irq.h> +#include <linux/irq_work.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/seq_file.h> +#include <linux/sched/task_stack.h> +#include <linux/sched/mm.h> +#include <linux/sched/hotplug.h> +#include <asm/irq.h> +#include <asm/traps.h> +#include <asm/sections.h> +#include <asm/mmu_context.h> +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +#endif + +enum ipi_message_type { + IPI_EMPTY, + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_IRQ_WORK, + IPI_MAX +}; + +struct ipi_data_struct { + unsigned long bits ____cacheline_aligned; + unsigned long stats[IPI_MAX] ____cacheline_aligned; +}; +static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data); + +static irqreturn_t handle_ipi(int irq, void *dev) +{ + unsigned long *stats = this_cpu_ptr(&ipi_data)->stats; + + while (true) { + unsigned long ops; + + ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0); + if (ops == 0) + return IRQ_HANDLED; + + if (ops & (1 << IPI_RESCHEDULE)) { + stats[IPI_RESCHEDULE]++; + scheduler_ipi(); + } + + if (ops & (1 << IPI_CALL_FUNC)) { + stats[IPI_CALL_FUNC]++; + generic_smp_call_function_interrupt(); + } + + if (ops & (1 << IPI_IRQ_WORK)) { + stats[IPI_IRQ_WORK]++; + irq_work_run(); + } + + BUG_ON((ops >> IPI_MAX) != 0); + } + + return IRQ_HANDLED; +} + +static void (*send_arch_ipi)(const struct cpumask *mask); + +static int ipi_irq; +void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq) +{ + if (send_arch_ipi) + return; + + send_arch_ipi = func; + ipi_irq = irq; +} + +static void +send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) +{ + int i; + + for_each_cpu(i, to_whom) + set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits); + + smp_mb(); + send_arch_ipi(to_whom); +} + +static const char * const ipi_names[] = { + [IPI_EMPTY] = "Empty interrupts", + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNC] = "Function call interrupts", + [IPI_IRQ_WORK] = "Irq work interrupts", +}; + +int arch_show_interrupts(struct seq_file *p, int prec) +{ + unsigned int cpu, i; + + for (i = 0; i < IPI_MAX; i++) { + seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, + prec >= 4 ? " " : ""); + for_each_online_cpu(cpu) + seq_printf(p, "%10lu ", + per_cpu_ptr(&ipi_data, cpu)->stats[i]); + seq_printf(p, " %s\n", ipi_names[i]); + } + + return 0; +} + +void arch_send_call_function_ipi_mask(struct cpumask *mask) +{ + send_ipi_message(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); +} + +static void ipi_stop(void *unused) +{ + while (1); +} + +void smp_send_stop(void) +{ + on_each_cpu(ipi_stop, NULL, 1); +} + +void smp_send_reschedule(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +} + +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); +} +#endif + +void __init smp_prepare_boot_cpu(void) +{ +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ +} + +static int ipi_dummy_dev; + +void __init setup_smp_ipi(void) +{ + int rc; + + if (ipi_irq == 0) + return; + + rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", + &ipi_dummy_dev); + if (rc) + panic("%s IRQ request failed\n", __func__); + + enable_percpu_irq(ipi_irq, 0); +} + +void __init setup_smp(void) +{ + struct device_node *node = NULL; + int cpu; + + for_each_of_cpu_node(node) { + if (!of_device_is_available(node)) + continue; + + if (of_property_read_u32(node, "reg", &cpu)) + continue; + + if (cpu >= NR_CPUS) + continue; + + set_cpu_possible(cpu, true); + set_cpu_present(cpu, true); + } +} + +extern void _start_smp_secondary(void); + +volatile unsigned int secondary_hint; +volatile unsigned int secondary_hint2; +volatile unsigned int secondary_ccr; +volatile unsigned int secondary_stack; + +unsigned long secondary_msa1; + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + unsigned long mask = 1 << cpu; + + secondary_stack = + (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; + secondary_hint = mfcr("cr31"); + secondary_hint2 = mfcr("cr<21, 1>"); + secondary_ccr = mfcr("cr18"); + secondary_msa1 = read_mmu_msa1(); + + /* + * Because other CPUs are in reset status, we must flush data + * from cache to out and secondary CPUs use them in + * csky_start_secondary(void) + */ + mtcr("cr17", 0x22); + + if (mask & mfcr("cr<29, 0>")) { + send_arch_ipi(cpumask_of(cpu)); + } else { + /* Enable cpu in SMP reset ctrl reg */ + mask |= mfcr("cr<29, 0>"); + mtcr("cr<29, 0>", mask); + } + + /* Wait for the cpu online */ + while (!cpu_online(cpu)); + + secondary_stack = 0; + + return 0; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ +} + +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + +void csky_start_secondary(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + + mtcr("cr31", secondary_hint); + mtcr("cr<21, 1>", secondary_hint2); + mtcr("cr18", secondary_ccr); + + mtcr("vbr", vec_base); + + flush_tlb_all(); + write_mmu_pagemask(0); + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); + TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); + +#ifdef CONFIG_CPU_HAS_FPU + init_fpu(); +#endif + + enable_percpu_irq(ipi_irq, 0); + + mmget(mm); + mmgrab(mm); + current->active_mm = mm; + cpumask_set_cpu(cpu, mm_cpumask(mm)); + + notify_cpu_starting(cpu); + set_cpu_online(cpu, true); + + pr_info("CPU%u Online: %s...\n", cpu, __func__); + + local_irq_enable(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + + set_cpu_online(cpu, false); + + irq_migrate_all_off_this_cpu(); + + clear_tasks_mm_cpumask(cpu); + + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + if (!cpu_wait_death(cpu, 5)) { + pr_crit("CPU%u: shutdown failed\n", cpu); + return; + } + pr_notice("CPU%u: shutdown\n", cpu); +} + +void arch_cpu_idle_dead(void) +{ + idle_task_exit(); + + cpu_report_death(); + + while (!secondary_stack) + arch_cpu_idle(); + + local_irq_disable(); + + asm volatile( + "mov sp, %0\n" + "mov r8, %0\n" + "jmpi csky_start_secondary" + : + : "r" (secondary_stack)); +} +#endif diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c new file mode 100644 index 000000000..16ae20a0a --- /dev/null +++ b/arch/csky/kernel/stacktrace.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> +#include <linux/stacktrace.h> +#include <linux/ftrace.h> +#include <linux/ptrace.h> + +#ifdef CONFIG_FRAME_POINTER + +struct stackframe { + unsigned long fp; + unsigned long ra; +}; + +void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, + bool (*fn)(unsigned long, void *), void *arg) +{ + unsigned long fp, sp, pc; + + if (regs) { + fp = frame_pointer(regs); + sp = user_stack_pointer(regs); + pc = instruction_pointer(regs); + } else if (task == NULL || task == current) { + const register unsigned long current_sp __asm__ ("sp"); + const register unsigned long current_fp __asm__ ("r8"); + fp = current_fp; + sp = current_sp; + pc = (unsigned long)walk_stackframe; + } else { + /* task blocked in __switch_to */ + fp = thread_saved_fp(task); + sp = thread_saved_sp(task); + pc = thread_saved_lr(task); + } + + for (;;) { + unsigned long low, high; + struct stackframe *frame; + + if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) + break; + + /* Validate frame pointer */ + low = sp; + high = ALIGN(sp, THREAD_SIZE); + if (unlikely(fp < low || fp > high || fp & 0x3)) + break; + /* Unwind stack frame */ + frame = (struct stackframe *)fp; + sp = fp; + fp = frame->fp; + pc = ftrace_graph_ret_addr(current, NULL, frame->ra, + (unsigned long *)(fp - 8)); + } +} + +#else /* !CONFIG_FRAME_POINTER */ + +static void notrace walk_stackframe(struct task_struct *task, + struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) +{ + unsigned long sp, pc; + unsigned long *ksp; + + if (regs) { + sp = user_stack_pointer(regs); + pc = instruction_pointer(regs); + } else if (task == NULL || task == current) { + const register unsigned long current_sp __asm__ ("sp"); + sp = current_sp; + pc = (unsigned long)walk_stackframe; + } else { + /* task blocked in __switch_to */ + sp = thread_saved_sp(task); + pc = thread_saved_lr(task); + } + + if (unlikely(sp & 0x3)) + return; + + ksp = (unsigned long *)sp; + while (!kstack_end(ksp)) { + if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) + break; + pc = (*ksp++) - 0x4; + } +} +#endif /* CONFIG_FRAME_POINTER */ + +static bool print_trace_address(unsigned long pc, void *arg) +{ + print_ip_sym((const char *)arg, pc); + return false; +} + +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) +{ + pr_cont("Call Trace:\n"); + walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); +} + +static bool save_wchan(unsigned long pc, void *arg) +{ + if (!in_sched_functions(pc)) { + unsigned long *p = arg; + *p = pc; + return true; + } + return false; +} + +unsigned long get_wchan(struct task_struct *task) +{ + unsigned long pc = 0; + + if (likely(task && task != current && task->state != TASK_RUNNING)) + walk_stackframe(task, NULL, save_wchan, &pc); + return pc; +} + +#ifdef CONFIG_STACKTRACE +static bool __save_trace(unsigned long pc, void *arg, bool nosched) +{ + struct stack_trace *trace = arg; + + if (unlikely(nosched && in_sched_functions(pc))) + return false; + if (unlikely(trace->skip > 0)) { + trace->skip--; + return false; + } + + trace->entries[trace->nr_entries++] = pc; + return (trace->nr_entries >= trace->max_entries); +} + +static bool save_trace(unsigned long pc, void *arg) +{ + return __save_trace(pc, arg, false); +} + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + walk_stackframe(tsk, NULL, save_trace, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + save_stack_trace_tsk(NULL, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +#endif /* CONFIG_STACKTRACE */ diff --git a/arch/csky/kernel/syscall.c b/arch/csky/kernel/syscall.c new file mode 100644 index 000000000..3d30e58a4 --- /dev/null +++ b/arch/csky/kernel/syscall.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/syscalls.h> + +SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) +{ + struct thread_info *ti = task_thread_info(current); + struct pt_regs *reg = current_pt_regs(); + + reg->tls = addr; + ti->tp_value = addr; + + return 0; +} + +SYSCALL_DEFINE6(mmap2, + unsigned long, addr, + unsigned long, len, + unsigned long, prot, + unsigned long, flags, + unsigned long, fd, + off_t, offset) +{ + if (unlikely(offset & (~PAGE_MASK >> 12))) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, + offset >> (PAGE_SHIFT - 12)); +} + +/* + * for abiv1 the 64bits args should be even th, So we need mov the advice + * forward. + */ +SYSCALL_DEFINE4(csky_fadvise64_64, + int, fd, + int, advice, + loff_t, offset, + loff_t, len) +{ + return ksys_fadvise64_64(fd, offset, len, advice); +} diff --git a/arch/csky/kernel/syscall_table.c b/arch/csky/kernel/syscall_table.c new file mode 100644 index 000000000..a0c238c53 --- /dev/null +++ b/arch/csky/kernel/syscall_table.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/syscalls.h> +#include <asm/syscalls.h> + +#undef __SYSCALL +#define __SYSCALL(nr, call)[nr] = (call), + +#define sys_fadvise64_64 sys_csky_fadvise64_64 +void * const sys_call_table[__NR_syscalls] __page_aligned_data = { + [0 ... __NR_syscalls - 1] = sys_ni_syscall, +#include <asm/unistd.h> +}; diff --git a/arch/csky/kernel/time.c b/arch/csky/kernel/time.c new file mode 100644 index 000000000..52379d866 --- /dev/null +++ b/arch/csky/kernel/time.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/clocksource.h> +#include <linux/of_clk.h> + +void __init time_init(void) +{ + of_clk_init(NULL); + timer_probe(); +} diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c new file mode 100644 index 000000000..15711efa1 --- /dev/null +++ b/arch/csky/kernel/traps.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/user.h> +#include <linux/string.h> +#include <linux/linkage.h> +#include <linux/init.h> +#include <linux/ptrace.h> +#include <linux/kallsyms.h> +#include <linux/rtc.h> +#include <linux/uaccess.h> +#include <linux/kprobes.h> +#include <linux/kdebug.h> +#include <linux/sched/debug.h> + +#include <asm/setup.h> +#include <asm/traps.h> +#include <asm/pgalloc.h> +#include <asm/siginfo.h> + +#include <asm/mmu_context.h> + +#ifdef CONFIG_CPU_HAS_FPU +#include <abi/fpu.h> +#endif + +int show_unhandled_signals = 1; + +/* Defined in entry.S */ +asmlinkage void csky_trap(void); + +asmlinkage void csky_systemcall(void); +asmlinkage void csky_cmpxchg(void); +asmlinkage void csky_get_tls(void); +asmlinkage void csky_irq(void); + +asmlinkage void csky_tlbinvalidl(void); +asmlinkage void csky_tlbinvalids(void); +asmlinkage void csky_tlbmodified(void); + +/* Defined in head.S */ +asmlinkage void _start_smp_secondary(void); + +void __init pre_trap_init(void) +{ + int i; + + mtcr("vbr", vec_base); + + for (i = 1; i < 128; i++) + VEC_INIT(i, csky_trap); +} + +void __init trap_init(void) +{ + VEC_INIT(VEC_AUTOVEC, csky_irq); + + /* setup trap0 trap2 trap3 */ + VEC_INIT(VEC_TRAP0, csky_systemcall); + VEC_INIT(VEC_TRAP2, csky_cmpxchg); + VEC_INIT(VEC_TRAP3, csky_get_tls); + + /* setup MMU TLB exception */ + VEC_INIT(VEC_TLBINVALIDL, csky_tlbinvalidl); + VEC_INIT(VEC_TLBINVALIDS, csky_tlbinvalids); + VEC_INIT(VEC_TLBMODIFIED, csky_tlbmodified); + +#ifdef CONFIG_CPU_HAS_FPU + init_fpu(); +#endif + +#ifdef CONFIG_SMP + mtcr("cr<28, 0>", virt_to_phys(vec_base)); + + VEC_INIT(VEC_RESET, (void *)virt_to_phys(_start_smp_secondary)); +#endif +} + +static DEFINE_SPINLOCK(die_lock); + +void die(struct pt_regs *regs, const char *str) +{ + static int die_counter; + int ret; + + oops_enter(); + + spin_lock_irq(&die_lock); + console_verbose(); + bust_spinlocks(1); + + pr_emerg("%s [#%d]\n", str, ++die_counter); + print_modules(); + show_regs(regs); + show_stack(current, (unsigned long *)regs->regs[4], KERN_INFO); + + ret = notify_die(DIE_OOPS, str, regs, 0, trap_no(regs), SIGSEGV); + + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irq(&die_lock); + oops_exit(); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + if (ret != NOTIFY_STOP) + make_task_dead(SIGSEGV); +} + +void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) +{ + struct task_struct *tsk = current; + + if (show_unhandled_signals && unhandled_signal(tsk, signo) + && printk_ratelimit()) { + pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x%08lx", + tsk->comm, task_pid_nr(tsk), signo, code, addr); + print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); + pr_cont("\n"); + show_regs(regs); + } + + force_sig_fault(signo, code, (void __user *)addr); +} + +static void do_trap_error(struct pt_regs *regs, int signo, int code, + unsigned long addr, const char *str) +{ + current->thread.trap_no = trap_no(regs); + + if (user_mode(regs)) { + do_trap(regs, signo, code, addr); + } else { + if (!fixup_exception(regs)) + die(regs, str); + } +} + +#define DO_ERROR_INFO(name, signo, code, str) \ +asmlinkage __visible void name(struct pt_regs *regs) \ +{ \ + do_trap_error(regs, signo, code, regs->pc, "Oops - " str); \ +} + +DO_ERROR_INFO(do_trap_unknown, + SIGILL, ILL_ILLTRP, "unknown exception"); +DO_ERROR_INFO(do_trap_zdiv, + SIGFPE, FPE_INTDIV, "error zero div exception"); +DO_ERROR_INFO(do_trap_buserr, + SIGSEGV, ILL_ILLADR, "error bus error exception"); + +asmlinkage void do_trap_misaligned(struct pt_regs *regs) +{ +#ifdef CONFIG_CPU_NEED_SOFTALIGN + csky_alignment(regs); +#else + current->thread.trap_no = trap_no(regs); + do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->pc, + "Oops - load/store address misaligned"); +#endif +} + +asmlinkage void do_trap_bkpt(struct pt_regs *regs) +{ +#ifdef CONFIG_KPROBES + if (kprobe_single_step_handler(regs)) + return; +#endif +#ifdef CONFIG_UPROBES + if (uprobe_single_step_handler(regs)) + return; +#endif + if (user_mode(regs)) { + send_sig(SIGTRAP, current, 0); + return; + } + + do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->pc, + "Oops - illegal trap exception"); +} + +asmlinkage void do_trap_illinsn(struct pt_regs *regs) +{ + current->thread.trap_no = trap_no(regs); + +#ifdef CONFIG_KPROBES + if (kprobe_breakpoint_handler(regs)) + return; +#endif +#ifdef CONFIG_UPROBES + if (uprobe_breakpoint_handler(regs)) + return; +#endif +#ifndef CONFIG_CPU_NO_USER_BKPT + if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT) { + send_sig(SIGTRAP, current, 0); + return; + } +#endif + + do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc, + "Oops - illegal instruction exception"); +} + +asmlinkage void do_trap_fpe(struct pt_regs *regs) +{ +#ifdef CONFIG_CPU_HAS_FPU + return fpu_fpe(regs); +#else + do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc, + "Oops - fpu instruction exception"); +#endif +} + +asmlinkage void do_trap_priv(struct pt_regs *regs) +{ +#ifdef CONFIG_CPU_HAS_FPU + if (user_mode(regs) && fpu_libc_helper(regs)) + return; +#endif + do_trap_error(regs, SIGILL, ILL_PRVOPC, regs->pc, + "Oops - illegal privileged exception"); +} + +asmlinkage void trap_c(struct pt_regs *regs) +{ + switch (trap_no(regs)) { + case VEC_ZERODIV: + do_trap_zdiv(regs); + break; + case VEC_TRACE: + do_trap_bkpt(regs); + break; + case VEC_ILLEGAL: + do_trap_illinsn(regs); + break; + case VEC_TRAP1: + case VEC_BREAKPOINT: + do_trap_bkpt(regs); + break; + case VEC_ACCESS: + do_trap_buserr(regs); + break; + case VEC_ALIGN: + do_trap_misaligned(regs); + break; + case VEC_FPE: + do_trap_fpe(regs); + break; + case VEC_PRIV: + do_trap_priv(regs); + break; + default: + do_trap_unknown(regs); + break; + } +} diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c new file mode 100644 index 000000000..abc3dbc65 --- /dev/null +++ b/arch/csky/kernel/vdso.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/binfmts.h> +#include <linux/elf.h> +#include <linux/vmalloc.h> +#include <linux/unistd.h> +#include <linux/uaccess.h> + +#include <asm/vdso.h> +#include <asm/cacheflush.h> + +static struct page *vdso_page; + +static int __init init_vdso(void) +{ + struct csky_vdso *vdso; + int err = 0; + + vdso_page = alloc_page(GFP_KERNEL); + if (!vdso_page) + panic("Cannot allocate vdso"); + + vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); + if (!vdso) + panic("Cannot map vdso"); + + clear_page(vdso); + + err = setup_vdso_page(vdso->rt_signal_retcode); + if (err) + panic("Cannot set signal return code, err: %x.", err); + + dcache_wb_range((unsigned long)vdso, (unsigned long)vdso + 16); + + vunmap(vdso); + + return 0; +} +subsys_initcall(init_vdso); + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + int ret; + unsigned long addr; + struct mm_struct *mm = current->mm; + + mmap_write_lock(mm); + + addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping( + mm, + addr, + PAGE_SIZE, + VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &vdso_page); + if (ret) + goto up_fail; + + mm->context.vdso = (void *)addr; + +up_fail: + mmap_write_unlock(mm); + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_mm == NULL) + return NULL; + + if (vma->vm_start == (long)vma->vm_mm->context.vdso) + return "[vdso]"; + else + return NULL; +} diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S new file mode 100644 index 000000000..f03033e17 --- /dev/null +++ b/arch/csky/kernel/vmlinux.lds.S @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <asm/vmlinux.lds.h> +#include <asm/page.h> +#include <asm/memory.h> + +OUTPUT_ARCH(csky) +ENTRY(_start) + +#ifndef __cskyBE__ +jiffies = jiffies_64; +#else +jiffies = jiffies_64 + 4; +#endif + +#define VBR_BASE \ + . = ALIGN(1024); \ + vec_base = .; \ + . += 512; + +SECTIONS +{ + . = PAGE_OFFSET + PHYS_OFFSET_OFFSET; + + _stext = .; + __init_begin = .; + HEAD_TEXT_SECTION + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(PAGE_SIZE) + PERCPU_SECTION(L1_CACHE_BYTES) + . = ALIGN(PAGE_SIZE); + __init_end = .; + + .text : AT(ADDR(.text) - LOAD_OFFSET) { + _text = .; + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + *(.fixup) + *(.gnu.warning) + } = 0 + _etext = .; + + /* __init_begin __init_end must be page aligned for free_initmem */ + . = ALIGN(PAGE_SIZE); + + + _sdata = .; + RO_DATA(PAGE_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + _edata = .; + +#ifdef CONFIG_HAVE_TCM + .tcm_start : { + . = ALIGN(PAGE_SIZE); + __tcm_start = .; + } + + .text_data_tcm FIXADDR_TCM : AT(__tcm_start) + { + . = ALIGN(4); + __stcm_text_data = .; + *(.tcm.text) + *(.tcm.rodata) +#ifndef CONFIG_HAVE_DTCM + *(.tcm.data) +#endif + . = ALIGN(4); + __etcm_text_data = .; + } + + . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm); + +#ifdef CONFIG_HAVE_DTCM + #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE + + .dtcm_start : { + __dtcm_start = .; + } + + .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start) + { + . = ALIGN(4); + __stcm_data = .; + *(.tcm.data) + . = ALIGN(4); + __etcm_data = .; + } + + . = ADDR(.dtcm_start) + SIZEOF(.data_tcm); + + .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) { +#else + .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) { +#endif + . = ALIGN(PAGE_SIZE); + __tcm_end = .; + } +#endif + + EXCEPTION_TABLE(L1_CACHE_BYTES) + BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) + VBR_BASE + _end = . ; + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + DISCARDS +} |