summaryrefslogtreecommitdiffstats
path: root/arch/csky/kernel
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/csky/kernel
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/csky/kernel')
-rw-r--r--arch/csky/kernel/Makefile20
-rw-r--r--arch/csky/kernel/asm-offsets.c83
-rw-r--r--arch/csky/kernel/atomic.S61
-rw-r--r--arch/csky/kernel/cpu-probe.c79
-rw-r--r--arch/csky/kernel/entry.S283
-rw-r--r--arch/csky/kernel/ftrace.c234
-rw-r--r--arch/csky/kernel/head.S42
-rw-r--r--arch/csky/kernel/io.c91
-rw-r--r--arch/csky/kernel/irq.c17
-rw-r--r--arch/csky/kernel/jump_label.c54
-rw-r--r--arch/csky/kernel/module.c97
-rw-r--r--arch/csky/kernel/perf_callchain.c114
-rw-r--r--arch/csky/kernel/perf_event.c1371
-rw-r--r--arch/csky/kernel/perf_regs.c39
-rw-r--r--arch/csky/kernel/power.c28
-rw-r--r--arch/csky/kernel/probes/Makefile7
-rw-r--r--arch/csky/kernel/probes/decode-insn.c49
-rw-r--r--arch/csky/kernel/probes/decode-insn.h20
-rw-r--r--arch/csky/kernel/probes/ftrace.c67
-rw-r--r--arch/csky/kernel/probes/kprobes.c412
-rw-r--r--arch/csky/kernel/probes/kprobes_trampoline.S19
-rw-r--r--arch/csky/kernel/probes/simulate-insn.c390
-rw-r--r--arch/csky/kernel/probes/simulate-insn.h49
-rw-r--r--arch/csky/kernel/probes/uprobes.c155
-rw-r--r--arch/csky/kernel/process.c105
-rw-r--r--arch/csky/kernel/ptrace.c521
-rw-r--r--arch/csky/kernel/setup.c134
-rw-r--r--arch/csky/kernel/signal.c269
-rw-r--r--arch/csky/kernel/smp.c321
-rw-r--r--arch/csky/kernel/stacktrace.c158
-rw-r--r--arch/csky/kernel/syscall.c43
-rw-r--r--arch/csky/kernel/syscall_table.c14
-rw-r--r--arch/csky/kernel/time.c11
-rw-r--r--arch/csky/kernel/traps.c262
-rw-r--r--arch/csky/kernel/vdso.c107
-rw-r--r--arch/csky/kernel/vdso/.gitignore4
-rw-r--r--arch/csky/kernel/vdso/Makefile72
-rw-r--r--arch/csky/kernel/vdso/note.S12
-rw-r--r--arch/csky/kernel/vdso/rt_sigreturn.S14
-rwxr-xr-xarch/csky/kernel/vdso/so2s.sh5
-rw-r--r--arch/csky/kernel/vdso/vdso.S16
-rw-r--r--arch/csky/kernel/vdso/vdso.lds.S58
-rw-r--r--arch/csky/kernel/vdso/vgettimeofday.c28
-rw-r--r--arch/csky/kernel/vmlinux.lds.S116
44 files changed, 6051 insertions, 0 deletions
diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile
new file mode 100644
index 000000000..8a868316b
--- /dev/null
+++ b/arch/csky/kernel/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+extra-y := vmlinux.lds
+
+obj-y += head.o entry.o atomic.o signal.o traps.o irq.o time.o vdso.o vdso/
+obj-y += power.o syscall.o syscall_table.o setup.o io.o
+obj-y += process.o cpu-probe.o ptrace.o stacktrace.o
+obj-y += probes/
+
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o
+obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
+obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+endif
diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c
new file mode 100644
index 000000000..d1e903579
--- /dev/null
+++ b/arch/csky/kernel/asm-offsets.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/kbuild.h>
+#include <abi/regdef.h>
+
+int main(void)
+{
+ /* offsets into the task struct */
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+
+ /* offsets into the thread struct */
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, sp));
+ DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr));
+ DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr));
+ DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr));
+
+ /* offsets into the thread_info struct */
+ DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TINFO_TP_VALUE, offsetof(struct thread_info, tp_value));
+ DEFINE(TINFO_TASK, offsetof(struct thread_info, task));
+
+ /* offsets into the pt_regs */
+ DEFINE(PT_PC, offsetof(struct pt_regs, pc));
+ DEFINE(PT_ORIG_AO, offsetof(struct pt_regs, orig_a0));
+ DEFINE(PT_SR, offsetof(struct pt_regs, sr));
+
+ DEFINE(PT_A0, offsetof(struct pt_regs, a0));
+ DEFINE(PT_A1, offsetof(struct pt_regs, a1));
+ DEFINE(PT_A2, offsetof(struct pt_regs, a2));
+ DEFINE(PT_A3, offsetof(struct pt_regs, a3));
+ DEFINE(PT_REGS0, offsetof(struct pt_regs, regs[0]));
+ DEFINE(PT_REGS1, offsetof(struct pt_regs, regs[1]));
+ DEFINE(PT_REGS2, offsetof(struct pt_regs, regs[2]));
+ DEFINE(PT_REGS3, offsetof(struct pt_regs, regs[3]));
+ DEFINE(PT_REGS4, offsetof(struct pt_regs, regs[4]));
+ DEFINE(PT_REGS5, offsetof(struct pt_regs, regs[5]));
+ DEFINE(PT_REGS6, offsetof(struct pt_regs, regs[6]));
+ DEFINE(PT_REGS7, offsetof(struct pt_regs, regs[7]));
+ DEFINE(PT_REGS8, offsetof(struct pt_regs, regs[8]));
+ DEFINE(PT_REGS9, offsetof(struct pt_regs, regs[9]));
+ DEFINE(PT_R15, offsetof(struct pt_regs, lr));
+#if defined(__CSKYABIV2__)
+ DEFINE(PT_R16, offsetof(struct pt_regs, exregs[0]));
+ DEFINE(PT_R17, offsetof(struct pt_regs, exregs[1]));
+ DEFINE(PT_R18, offsetof(struct pt_regs, exregs[2]));
+ DEFINE(PT_R19, offsetof(struct pt_regs, exregs[3]));
+ DEFINE(PT_R20, offsetof(struct pt_regs, exregs[4]));
+ DEFINE(PT_R21, offsetof(struct pt_regs, exregs[5]));
+ DEFINE(PT_R22, offsetof(struct pt_regs, exregs[6]));
+ DEFINE(PT_R23, offsetof(struct pt_regs, exregs[7]));
+ DEFINE(PT_R24, offsetof(struct pt_regs, exregs[8]));
+ DEFINE(PT_R25, offsetof(struct pt_regs, exregs[9]));
+ DEFINE(PT_R26, offsetof(struct pt_regs, exregs[10]));
+ DEFINE(PT_R27, offsetof(struct pt_regs, exregs[11]));
+ DEFINE(PT_R28, offsetof(struct pt_regs, exregs[12]));
+ DEFINE(PT_R29, offsetof(struct pt_regs, exregs[13]));
+ DEFINE(PT_R30, offsetof(struct pt_regs, exregs[14]));
+ DEFINE(PT_R31, offsetof(struct pt_regs, exregs[15]));
+ DEFINE(PT_RHI, offsetof(struct pt_regs, rhi));
+ DEFINE(PT_RLO, offsetof(struct pt_regs, rlo));
+#endif
+ DEFINE(PT_USP, offsetof(struct pt_regs, usp));
+ DEFINE(PT_FRAME_SIZE, sizeof(struct pt_regs));
+
+ /* offsets into the irq_cpustat_t struct */
+ DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t,
+ __softirq_pending));
+
+ /* signal defines */
+ DEFINE(SIGSEGV, SIGSEGV);
+ DEFINE(SIGTRAP, SIGTRAP);
+
+ return 0;
+}
diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S
new file mode 100644
index 000000000..e73e548f7
--- /dev/null
+++ b/arch/csky/kernel/atomic.S
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include <abi/entry.h>
+
+.text
+
+/*
+ * int csky_cmpxchg(int oldval, int newval, int *ptr)
+ *
+ * If *ptr != oldval && return 1,
+ * else *ptr = newval return 0.
+ */
+ENTRY(csky_cmpxchg)
+ USPTOKSP
+
+ RD_MEH a3
+ WR_MEH a3
+
+ mfcr a3, epc
+ addi a3, TRAP0_SIZE
+
+ subi sp, 16
+ stw a3, (sp, 0)
+ mfcr a3, epsr
+ stw a3, (sp, 4)
+ mfcr a3, usp
+ stw a3, (sp, 8)
+
+ psrset ee
+#ifdef CONFIG_CPU_HAS_LDSTEX
+1:
+ ldex a3, (a2)
+ cmpne a0, a3
+ bt16 2f
+ mov a3, a1
+ stex a3, (a2)
+ bez a3, 1b
+2:
+ sync.is
+#else
+GLOBAL(csky_cmpxchg_ldw)
+ ldw a3, (a2)
+ cmpne a0, a3
+ bt16 3f
+GLOBAL(csky_cmpxchg_stw)
+ stw a1, (a2)
+3:
+#endif
+ mvc a0
+ ldw a3, (sp, 0)
+ mtcr a3, epc
+ ldw a3, (sp, 4)
+ mtcr a3, epsr
+ ldw a3, (sp, 8)
+ mtcr a3, usp
+ addi sp, 16
+ KSPTOUSP
+ rte
+END(csky_cmpxchg)
diff --git a/arch/csky/kernel/cpu-probe.c b/arch/csky/kernel/cpu-probe.c
new file mode 100644
index 000000000..5f15ca31d
--- /dev/null
+++ b/arch/csky/kernel/cpu-probe.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/memblock.h>
+
+#include <abi/reg_ops.h>
+
+static void percpu_print(void *arg)
+{
+ struct seq_file *m = (struct seq_file *)arg;
+ unsigned int cur, next, i;
+
+ seq_printf(m, "processor : %d\n", smp_processor_id());
+ seq_printf(m, "C-SKY CPU model : %s\n", CSKYCPU_DEF_NAME);
+
+ /* read processor id, max is 100 */
+ cur = mfcr("cr13");
+ for (i = 0; i < 100; i++) {
+ seq_printf(m, "product info[%d] : 0x%08x\n", i, cur);
+
+ next = mfcr("cr13");
+
+ /* some CPU only has one id reg */
+ if (cur == next)
+ break;
+
+ cur = next;
+
+ /* cpid index is 31-28, reset */
+ if (!(next >> 28)) {
+ while ((mfcr("cr13") >> 28) != i);
+ break;
+ }
+ }
+
+ /* CPU feature regs, setup by bootloader or gdbinit */
+ seq_printf(m, "hint (CPU funcs): 0x%08x\n", mfcr_hint());
+ seq_printf(m, "ccr (L1C & MMU): 0x%08x\n", mfcr("cr18"));
+ seq_printf(m, "ccr2 (L2C) : 0x%08x\n", mfcr_ccr2());
+ seq_printf(m, "\n");
+}
+
+static int c_show(struct seq_file *m, void *v)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ smp_call_function_single(cpu, percpu_print, m, true);
+
+#ifdef CSKY_ARCH_VERSION
+ seq_printf(m, "arch-version : %s\n", CSKY_ARCH_VERSION);
+ seq_printf(m, "\n");
+#endif
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v) {}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show,
+};
diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S
new file mode 100644
index 000000000..547b4cd1b
--- /dev/null
+++ b/arch/csky/kernel/entry.S
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/linkage.h>
+#include <abi/entry.h>
+#include <abi/pgtable-bits.h>
+#include <asm/errno.h>
+#include <asm/setup.h>
+#include <asm/unistd.h>
+#include <asm/asm-offsets.h>
+#include <linux/threads.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+.macro zero_fp
+#ifdef CONFIG_STACKTRACE
+ movi r8, 0
+#endif
+.endm
+
+.macro context_tracking
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+ mfcr a0, epsr
+ btsti a0, 31
+ bt 1f
+ jbsr user_exit_callable
+ ldw a0, (sp, LSAVE_A0)
+ ldw a1, (sp, LSAVE_A1)
+ ldw a2, (sp, LSAVE_A2)
+ ldw a3, (sp, LSAVE_A3)
+#if defined(__CSKYABIV1__)
+ ldw r6, (sp, LSAVE_A4)
+ ldw r7, (sp, LSAVE_A5)
+#endif
+1:
+#endif
+.endm
+
+.text
+ENTRY(csky_pagefault)
+ SAVE_ALL 0
+ zero_fp
+ context_tracking
+ psrset ee
+ mov a0, sp
+ jbsr do_page_fault
+ jmpi ret_from_exception
+
+ENTRY(csky_systemcall)
+ SAVE_ALL TRAP0_SIZE
+ zero_fp
+ context_tracking
+ psrset ee, ie
+
+ lrw r9, __NR_syscalls
+ cmphs syscallid, r9 /* Check nr of syscall */
+ bt 1f
+
+ lrw r9, sys_call_table
+ ixw r9, syscallid
+ ldw syscallid, (r9)
+ cmpnei syscallid, 0
+ bf ret_from_exception
+
+ mov r9, sp
+ bmaski r10, THREAD_SHIFT
+ andn r9, r10
+ ldw r10, (r9, TINFO_FLAGS)
+ lrw r9, _TIF_SYSCALL_WORK
+ and r10, r9
+ cmpnei r10, 0
+ bt csky_syscall_trace
+#if defined(__CSKYABIV2__)
+ subi sp, 8
+ stw r5, (sp, 0x4)
+ stw r4, (sp, 0x0)
+ jsr syscallid /* Do system call */
+ addi sp, 8
+#else
+ jsr syscallid
+#endif
+ stw a0, (sp, LSAVE_A0) /* Save return value */
+1:
+#ifdef CONFIG_DEBUG_RSEQ
+ mov a0, sp
+ jbsr rseq_syscall
+#endif
+ jmpi ret_from_exception
+
+csky_syscall_trace:
+ mov a0, sp /* sp = pt_regs pointer */
+ jbsr syscall_trace_enter
+ cmpnei a0, 0
+ bt 1f
+ /* Prepare args before do system call */
+ ldw a0, (sp, LSAVE_A0)
+ ldw a1, (sp, LSAVE_A1)
+ ldw a2, (sp, LSAVE_A2)
+ ldw a3, (sp, LSAVE_A3)
+#if defined(__CSKYABIV2__)
+ subi sp, 8
+ ldw r9, (sp, LSAVE_A4)
+ stw r9, (sp, 0x0)
+ ldw r9, (sp, LSAVE_A5)
+ stw r9, (sp, 0x4)
+ jsr syscallid /* Do system call */
+ addi sp, 8
+#else
+ ldw r6, (sp, LSAVE_A4)
+ ldw r7, (sp, LSAVE_A5)
+ jsr syscallid /* Do system call */
+#endif
+ stw a0, (sp, LSAVE_A0) /* Save return value */
+
+1:
+#ifdef CONFIG_DEBUG_RSEQ
+ mov a0, sp
+ jbsr rseq_syscall
+#endif
+ mov a0, sp /* right now, sp --> pt_regs */
+ jbsr syscall_trace_exit
+ br ret_from_exception
+
+ENTRY(ret_from_kernel_thread)
+ jbsr schedule_tail
+ mov a0, r10
+ jsr r9
+ jbsr ret_from_exception
+
+ENTRY(ret_from_fork)
+ jbsr schedule_tail
+ mov r9, sp
+ bmaski r10, THREAD_SHIFT
+ andn r9, r10
+ ldw r10, (r9, TINFO_FLAGS)
+ lrw r9, _TIF_SYSCALL_WORK
+ and r10, r9
+ cmpnei r10, 0
+ bf ret_from_exception
+ mov a0, sp /* sp = pt_regs pointer */
+ jbsr syscall_trace_exit
+
+ret_from_exception:
+ psrclr ie
+ ld r9, (sp, LSAVE_PSR)
+ btsti r9, 31
+
+ bt 1f
+ /*
+ * Load address of current->thread_info, Then get address of task_struct
+ * Get task_needreshed in task_struct
+ */
+ mov r9, sp
+ bmaski r10, THREAD_SHIFT
+ andn r9, r10
+
+ ldw r10, (r9, TINFO_FLAGS)
+ lrw r9, _TIF_WORK_MASK
+ and r10, r9
+ cmpnei r10, 0
+ bt exit_work
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+ jbsr user_enter_callable
+#endif
+1:
+#ifdef CONFIG_PREEMPTION
+ mov r9, sp
+ bmaski r10, THREAD_SHIFT
+ andn r9, r10
+
+ ldw r10, (r9, TINFO_PREEMPT)
+ cmpnei r10, 0
+ bt 2f
+ jbsr preempt_schedule_irq /* irq en/disable is done inside */
+2:
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ ld r10, (sp, LSAVE_PSR)
+ btsti r10, 6
+ bf 2f
+ jbsr trace_hardirqs_on
+2:
+#endif
+ RESTORE_ALL
+
+exit_work:
+ lrw r9, ret_from_exception
+ mov lr, r9
+
+ btsti r10, TIF_NEED_RESCHED
+ bt work_resched
+
+ psrset ie
+ mov a0, sp
+ mov a1, r10
+ jmpi do_notify_resume
+
+work_resched:
+ jmpi schedule
+
+ENTRY(csky_trap)
+ SAVE_ALL 0
+ zero_fp
+ context_tracking
+ psrset ee
+ mov a0, sp /* Push Stack pointer arg */
+ jbsr trap_c /* Call C-level trap handler */
+ jmpi ret_from_exception
+
+/*
+ * Prototype from libc for abiv1:
+ * register unsigned int __result asm("a0");
+ * asm( "trap 3" :"=r"(__result)::);
+ */
+ENTRY(csky_get_tls)
+ USPTOKSP
+
+ RD_MEH a0
+ WR_MEH a0
+
+ /* increase epc for continue */
+ mfcr a0, epc
+ addi a0, TRAP0_SIZE
+ mtcr a0, epc
+
+ /* get current task thread_info with kernel 8K stack */
+ bmaski a0, THREAD_SHIFT
+ not a0
+ subi sp, 1
+ and a0, sp
+ addi sp, 1
+
+ /* get tls */
+ ldw a0, (a0, TINFO_TP_VALUE)
+
+ KSPTOUSP
+ rte
+
+ENTRY(csky_irq)
+ SAVE_ALL 0
+ zero_fp
+ context_tracking
+ psrset ee
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ jbsr trace_hardirqs_off
+#endif
+
+
+ mov a0, sp
+ jbsr generic_handle_arch_irq
+
+ jmpi ret_from_exception
+
+/*
+ * a0 = prev task_struct *
+ * a1 = next task_struct *
+ * a0 = return next
+ */
+ENTRY(__switch_to)
+ lrw a3, TASK_THREAD
+ addu a3, a0
+
+ SAVE_SWITCH_STACK
+
+ stw sp, (a3, THREAD_KSP)
+
+ /* Set up next process to run */
+ lrw a3, TASK_THREAD
+ addu a3, a1
+
+ ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
+
+#if defined(__CSKYABIV2__)
+ addi a3, a1, TASK_THREAD_INFO
+ ldw tls, (a3, TINFO_TP_VALUE)
+#endif
+
+ RESTORE_SWITCH_STACK
+
+ rts
+ENDPROC(__switch_to)
diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c
new file mode 100644
index 000000000..50bfcf129
--- /dev/null
+++ b/arch/csky/kernel/ftrace.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <linux/stop_machine.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define NOP 0x4000
+#define NOP32_HI 0xc400
+#define NOP32_LO 0x4820
+#define PUSH_LR 0x14d0
+#define MOVIH_LINK 0xea3a
+#define ORI_LINK 0xef5a
+#define JSR_LINK 0xe8fa
+#define BSR_LINK 0xe000
+
+/*
+ * Gcc-csky with -pg will insert stub in function prologue:
+ * push lr
+ * jbsr _mcount
+ * nop32
+ * nop32
+ *
+ * If the (callee - current_pc) is less then 64MB, we'll use bsr:
+ * push lr
+ * bsr _mcount
+ * nop32
+ * nop32
+ * else we'll use (movih + ori + jsr):
+ * push lr
+ * movih r26, ...
+ * ori r26, ...
+ * jsr r26
+ *
+ * (r26 is our reserved link-reg)
+ *
+ */
+static inline void make_jbsr(unsigned long callee, unsigned long pc,
+ uint16_t *call, bool nolr)
+{
+ long offset;
+
+ call[0] = nolr ? NOP : PUSH_LR;
+
+ offset = (long) callee - (long) pc;
+
+ if (unlikely(offset < -67108864 || offset > 67108864)) {
+ call[1] = MOVIH_LINK;
+ call[2] = callee >> 16;
+ call[3] = ORI_LINK;
+ call[4] = callee & 0xffff;
+ call[5] = JSR_LINK;
+ call[6] = 0;
+ } else {
+ offset = offset >> 1;
+
+ call[1] = BSR_LINK |
+ ((uint16_t)((unsigned long) offset >> 16) & 0x3ff);
+ call[2] = (uint16_t)((unsigned long) offset & 0xffff);
+ call[3] = call[5] = NOP32_HI;
+ call[4] = call[6] = NOP32_LO;
+ }
+}
+
+static uint16_t nops[7] = {NOP, NOP32_HI, NOP32_LO, NOP32_HI, NOP32_LO,
+ NOP32_HI, NOP32_LO};
+static int ftrace_check_current_nop(unsigned long hook)
+{
+ uint16_t olds[7];
+ unsigned long hook_pos = hook - 2;
+
+ if (copy_from_kernel_nofault((void *)olds, (void *)hook_pos,
+ sizeof(nops)))
+ return -EFAULT;
+
+ if (memcmp((void *)nops, (void *)olds, sizeof(nops))) {
+ pr_err("%p: nop but get (%04x %04x %04x %04x %04x %04x %04x)\n",
+ (void *)hook_pos,
+ olds[0], olds[1], olds[2], olds[3], olds[4], olds[5],
+ olds[6]);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ftrace_modify_code(unsigned long hook, unsigned long target,
+ bool enable, bool nolr)
+{
+ uint16_t call[7];
+
+ unsigned long hook_pos = hook - 2;
+ int ret = 0;
+
+ make_jbsr(target, hook, call, nolr);
+
+ ret = copy_to_kernel_nofault((void *)hook_pos, enable ? call : nops,
+ sizeof(nops));
+ if (ret)
+ return -EPERM;
+
+ flush_icache_range(hook_pos, hook_pos + MCOUNT_INSN_SIZE);
+
+ return 0;
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ int ret = ftrace_check_current_nop(rec->ip);
+
+ if (ret)
+ return ret;
+
+ return ftrace_modify_code(rec->ip, addr, true, false);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ unsigned long addr)
+{
+ return ftrace_modify_code(rec->ip, addr, false, false);
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ int ret = ftrace_modify_code((unsigned long)&ftrace_call,
+ (unsigned long)func, true, true);
+ if (!ret)
+ ret = ftrace_modify_code((unsigned long)&ftrace_regs_call,
+ (unsigned long)func, true, true);
+ return ret;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ return ftrace_modify_code(rec->ip, addr, true, true);
+}
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long old;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ old = *parent;
+
+ if (!function_graph_enter(old, self_addr,
+ *(unsigned long *)frame_pointer, parent)) {
+ /*
+ * For csky-gcc function has sub-call:
+ * subi sp, sp, 8
+ * stw r8, (sp, 0)
+ * mov r8, sp
+ * st.w r15, (sp, 0x4)
+ * push r15
+ * jl _mcount
+ * We only need set *parent for resume
+ *
+ * For csky-gcc function has no sub-call:
+ * subi sp, sp, 4
+ * stw r8, (sp, 0)
+ * mov r8, sp
+ * push r15
+ * jl _mcount
+ * We need set *parent and *(frame_pointer + 4) for resume,
+ * because lr is resumed twice.
+ */
+ *parent = return_hooker;
+ frame_pointer += 4;
+ if (*(unsigned long *)frame_pointer == old)
+ *(unsigned long *)frame_pointer = return_hooker;
+ }
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code((unsigned long)&ftrace_graph_call,
+ (unsigned long)&ftrace_graph_caller, true, true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code((unsigned long)&ftrace_graph_call,
+ (unsigned long)&ftrace_graph_caller, false, true);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+#ifndef CONFIG_CPU_HAS_ICACHE_INS
+struct ftrace_modify_param {
+ int command;
+ atomic_t cpu_count;
+};
+
+static int __ftrace_modify_code(void *data)
+{
+ struct ftrace_modify_param *param = data;
+
+ if (atomic_inc_return(&param->cpu_count) == 1) {
+ ftrace_modify_all_code(param->command);
+ atomic_inc(&param->cpu_count);
+ } else {
+ while (atomic_read(&param->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ local_icache_inv_all(NULL);
+ }
+
+ return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+ struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
+
+ stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
+}
+#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/* _mcount is defined in abi's mcount.S */
+EXPORT_SYMBOL(_mcount);
diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S
new file mode 100644
index 000000000..7e3e4f15b
--- /dev/null
+++ b/arch/csky/kernel/head.S
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/page.h>
+#include <abi/entry.h>
+
+__HEAD
+ENTRY(_start)
+ SETUP_MMU
+
+ /* set stack point */
+ lrw r6, init_thread_union + THREAD_SIZE
+ mov sp, r6
+
+ jmpi csky_start
+END(_start)
+
+#ifdef CONFIG_SMP
+.align 10
+ENTRY(_start_smp_secondary)
+ SETUP_MMU
+
+#ifdef CONFIG_PAGE_OFFSET_80000000
+ lrw r6, secondary_msa1
+ ld.w r6, (r6, 0)
+ mtcr r6, cr<31, 15>
+#endif
+
+ lrw r6, secondary_pgd
+ ld.w r6, (r6, 0)
+ mtcr r6, cr<28, 15>
+ mtcr r6, cr<29, 15>
+
+ /* set stack point */
+ lrw r6, secondary_stack
+ ld.w r6, (r6, 0)
+ mov sp, r6
+
+ jmpi csky_start_secondary
+END(_start_smp_secondary)
+#endif
diff --git a/arch/csky/kernel/io.c b/arch/csky/kernel/io.c
new file mode 100644
index 000000000..5883f13fa
--- /dev/null
+++ b/arch/csky/kernel/io.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ */
+void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)from, 4)) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 4) {
+ *(u32 *)to = __raw_readl(from);
+ from += 4;
+ to += 4;
+ count -= 4;
+ }
+
+ while (count) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ */
+void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)to, 4)) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 4) {
+ __raw_writel(*(u32 *)from, to);
+ from += 4;
+ to += 4;
+ count -= 4;
+ }
+
+ while (count) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ */
+void __memset_io(volatile void __iomem *dst, int c, size_t count)
+{
+ u32 qc = (u8)c;
+
+ qc |= qc << 8;
+ qc |= qc << 16;
+
+ while (count && !IS_ALIGNED((unsigned long)dst, 4)) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+
+ while (count >= 4) {
+ __raw_writel(qc, dst);
+ dst += 4;
+ count -= 4;
+ }
+
+ while (count) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memset_io);
diff --git a/arch/csky/kernel/irq.c b/arch/csky/kernel/irq.c
new file mode 100644
index 000000000..fcdaf3156
--- /dev/null
+++ b/arch/csky/kernel/irq.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <asm/traps.h>
+#include <asm/smp.h>
+
+void __init init_IRQ(void)
+{
+ irqchip_init();
+#ifdef CONFIG_SMP
+ setup_smp_ipi();
+#endif
+}
diff --git a/arch/csky/kernel/jump_label.c b/arch/csky/kernel/jump_label.c
new file mode 100644
index 000000000..d0e8b2144
--- /dev/null
+++ b/arch/csky/kernel/jump_label.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#define NOP32_HI 0xc400
+#define NOP32_LO 0x4820
+#define BSR_LINK 0xe000
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ unsigned long addr = jump_entry_code(entry);
+ u16 insn[2];
+ int ret = 0;
+
+ if (type == JUMP_LABEL_JMP) {
+ long offset = jump_entry_target(entry) - jump_entry_code(entry);
+
+ if (WARN_ON(offset & 1 || offset < -67108864 || offset >= 67108864))
+ return;
+
+ offset = offset >> 1;
+
+ insn[0] = BSR_LINK |
+ ((uint16_t)((unsigned long) offset >> 16) & 0x3ff);
+ insn[1] = (uint16_t)((unsigned long) offset & 0xffff);
+ } else {
+ insn[0] = NOP32_HI;
+ insn[1] = NOP32_LO;
+ }
+
+ ret = copy_to_kernel_nofault((void *)addr, insn, 4);
+ WARN_ON(ret);
+
+ flush_icache_range(addr, addr + 4);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ /*
+ * We use the same instructions in the arch_static_branch and
+ * arch_static_branch_jump inline functions, so there's no
+ * need to patch them up here.
+ * The core will call arch_jump_label_transform when those
+ * instructions need to be replaced.
+ */
+ arch_jump_label_transform(entry, type);
+}
diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c
new file mode 100644
index 000000000..f11b3e573
--- /dev/null
+++ b/arch/csky/kernel/module.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_CPU_CK810
+#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000)
+#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0)
+
+#define CHANGE_JSRI_TO_LRW(addr) do { \
+ *(uint16_t *)(addr) = (*(uint16_t *)(addr) & 0xFF9F) | 0x001a; \
+ *((uint16_t *)(addr) + 1) = *((uint16_t *)(addr) + 1) & 0xFFFF; \
+} while (0)
+
+#define SET_JSR32_R26(addr) do { \
+ *(uint16_t *)(addr) = 0xE8Fa; \
+ *((uint16_t *)(addr) + 1) = 0x0000; \
+} while (0)
+
+static void jsri_2_lrw_jsr(uint32_t *location)
+{
+ uint16_t *location_tmp = (uint16_t *)location;
+
+ if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
+ return;
+
+ if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
+ /* jsri 0x... --> lrw r26, 0x... */
+ CHANGE_JSRI_TO_LRW(location);
+ /* lsli r0, r0 --> jsr r26 */
+ SET_JSR32_R26(location + 1);
+ }
+}
+#else
+static void inline jsri_2_lrw_jsr(uint32_t *location)
+{
+ return;
+}
+#endif
+
+int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec, struct module *me)
+{
+ unsigned int i;
+ Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ uint32_t *location;
+ short *temp;
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_CSKY_32:
+ /* We add the value into the location given */
+ *location = rel[i].r_addend + sym->st_value;
+ break;
+ case R_CSKY_PC32:
+ /* Add the value, subtract its position */
+ *location = rel[i].r_addend + sym->st_value
+ - (uint32_t)location;
+ break;
+ case R_CSKY_PCRELJSR_IMM11BY2:
+ break;
+ case R_CSKY_PCRELJSR_IMM26BY2:
+ jsri_2_lrw_jsr(location);
+ break;
+ case R_CSKY_ADDR_HI16:
+ temp = ((short *)location) + 1;
+ *temp = (short)
+ ((rel[i].r_addend + sym->st_value) >> 16);
+ break;
+ case R_CSKY_ADDR_LO16:
+ temp = ((short *)location) + 1;
+ *temp = (short)
+ ((rel[i].r_addend + sym->st_value) & 0xffff);
+ break;
+ default:
+ pr_err("module %s: Unknown relocation: %u\n",
+ me->name, ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c
new file mode 100644
index 000000000..1612f4354
--- /dev/null
+++ b/arch/csky/kernel/perf_callchain.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+/* Kernel callchain */
+struct stackframe {
+ unsigned long fp;
+ unsigned long lr;
+};
+
+static int unwind_frame_kernel(struct stackframe *frame)
+{
+ unsigned long low = (unsigned long)task_stack_page(current);
+ unsigned long high = low + THREAD_SIZE;
+
+ if (unlikely(frame->fp < low || frame->fp > high))
+ return -EPERM;
+
+ if (kstack_end((void *)frame->fp) || frame->fp & 0x3)
+ return -EPERM;
+
+ *frame = *(struct stackframe *)frame->fp;
+
+ if (__kernel_text_address(frame->lr)) {
+ int graph = 0;
+
+ frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr,
+ NULL);
+ }
+ return 0;
+}
+
+static void notrace walk_stackframe(struct stackframe *fr,
+ struct perf_callchain_entry_ctx *entry)
+{
+ do {
+ perf_callchain_store(entry, fr->lr);
+ } while (unwind_frame_kernel(fr) >= 0);
+}
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
+ unsigned long fp, unsigned long reg_lr)
+{
+ struct stackframe buftail;
+ unsigned long lr = 0;
+ unsigned long __user *user_frame_tail = (unsigned long __user *)fp;
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ if (reg_lr != 0)
+ lr = reg_lr;
+ else
+ lr = buftail.lr;
+
+ fp = buftail.fp;
+ perf_callchain_store(entry, lr);
+
+ return fp;
+}
+
+/*
+ * This will be called when the target is in user mode
+ * This function will only be called when we use
+ * "PERF_SAMPLE_CALLCHAIN" in
+ * kernel/events/core.c:perf_prepare_sample()
+ *
+ * How to trigger perf_callchain_[user/kernel] :
+ * $ perf record -e cpu-clock --call-graph fp ./program
+ * $ perf report --call-graph
+ *
+ * On C-SKY platform, the program being sampled and the C library
+ * need to be compiled with * -mbacktrace, otherwise the user
+ * stack will not contain function frame.
+ */
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ unsigned long fp = 0;
+
+ fp = regs->regs[4];
+ perf_callchain_store(entry, regs->pc);
+
+ /*
+ * While backtrace from leaf function, lr is normally
+ * not saved inside frame on C-SKY, so get lr from pt_regs
+ * at the sample point. However, lr value can be incorrect if
+ * lr is used as temp register
+ */
+ fp = user_backtrace(entry, fp, regs->lr);
+
+ while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
+ fp = user_backtrace(entry, fp, 0);
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct stackframe fr;
+
+ fr.fp = regs->regs[4];
+ fr.lr = regs->lr;
+ walk_stackframe(&fr, entry);
+}
diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c
new file mode 100644
index 000000000..e5f18420c
--- /dev/null
+++ b/arch/csky/kernel/perf_event.c
@@ -0,0 +1,1371 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+#define CSKY_PMU_MAX_EVENTS 32
+#define DEFAULT_COUNT_WIDTH 48
+
+#define HPCR "<0, 0x0>" /* PMU Control reg */
+#define HPSPR "<0, 0x1>" /* Start PC reg */
+#define HPEPR "<0, 0x2>" /* End PC reg */
+#define HPSIR "<0, 0x3>" /* Soft Counter reg */
+#define HPCNTENR "<0, 0x4>" /* Count Enable reg */
+#define HPINTENR "<0, 0x5>" /* Interrupt Enable reg */
+#define HPOFSR "<0, 0x6>" /* Interrupt Status reg */
+
+/* The events for a given PMU register set. */
+struct pmu_hw_events {
+ /*
+ * The events that are active on the PMU for the given index.
+ */
+ struct perf_event *events[CSKY_PMU_MAX_EVENTS];
+
+ /*
+ * A 1 bit for an index indicates that the counter is being used for
+ * an event. A 0 means that the counter can be used.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(CSKY_PMU_MAX_EVENTS)];
+};
+
+static uint64_t (*hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])(void);
+static void (*hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])(uint64_t val);
+
+static struct csky_pmu_t {
+ struct pmu pmu;
+ struct pmu_hw_events __percpu *hw_events;
+ struct platform_device *plat_device;
+ uint32_t count_width;
+ uint32_t hpcr;
+ u64 max_period;
+} csky_pmu;
+static int csky_pmu_irq;
+
+#define to_csky_pmu(p) (container_of(p, struct csky_pmu, pmu))
+
+#define cprgr(reg) \
+({ \
+ unsigned int tmp; \
+ asm volatile("cprgr %0, "reg"\n" \
+ : "=r"(tmp) \
+ : \
+ : "memory"); \
+ tmp; \
+})
+
+#define cpwgr(reg, val) \
+({ \
+ asm volatile( \
+ "cpwgr %0, "reg"\n" \
+ : \
+ : "r"(val) \
+ : "memory"); \
+})
+
+#define cprcr(reg) \
+({ \
+ unsigned int tmp; \
+ asm volatile("cprcr %0, "reg"\n" \
+ : "=r"(tmp) \
+ : \
+ : "memory"); \
+ tmp; \
+})
+
+#define cpwcr(reg, val) \
+({ \
+ asm volatile( \
+ "cpwcr %0, "reg"\n" \
+ : \
+ : "r"(val) \
+ : "memory"); \
+})
+
+/* cycle counter */
+uint64_t csky_pmu_read_cc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x3>");
+ lo = cprgr("<0, 0x2>");
+ hi = cprgr("<0, 0x3>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_cc(uint64_t val)
+{
+ cpwgr("<0, 0x2>", (uint32_t) val);
+ cpwgr("<0, 0x3>", (uint32_t) (val >> 32));
+}
+
+/* instruction counter */
+static uint64_t csky_pmu_read_ic(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x5>");
+ lo = cprgr("<0, 0x4>");
+ hi = cprgr("<0, 0x5>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_ic(uint64_t val)
+{
+ cpwgr("<0, 0x4>", (uint32_t) val);
+ cpwgr("<0, 0x5>", (uint32_t) (val >> 32));
+}
+
+/* l1 icache access counter */
+static uint64_t csky_pmu_read_icac(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x7>");
+ lo = cprgr("<0, 0x6>");
+ hi = cprgr("<0, 0x7>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_icac(uint64_t val)
+{
+ cpwgr("<0, 0x6>", (uint32_t) val);
+ cpwgr("<0, 0x7>", (uint32_t) (val >> 32));
+}
+
+/* l1 icache miss counter */
+static uint64_t csky_pmu_read_icmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x9>");
+ lo = cprgr("<0, 0x8>");
+ hi = cprgr("<0, 0x9>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_icmc(uint64_t val)
+{
+ cpwgr("<0, 0x8>", (uint32_t) val);
+ cpwgr("<0, 0x9>", (uint32_t) (val >> 32));
+}
+
+/* l1 dcache access counter */
+static uint64_t csky_pmu_read_dcac(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0xb>");
+ lo = cprgr("<0, 0xa>");
+ hi = cprgr("<0, 0xb>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_dcac(uint64_t val)
+{
+ cpwgr("<0, 0xa>", (uint32_t) val);
+ cpwgr("<0, 0xb>", (uint32_t) (val >> 32));
+}
+
+/* l1 dcache miss counter */
+static uint64_t csky_pmu_read_dcmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0xd>");
+ lo = cprgr("<0, 0xc>");
+ hi = cprgr("<0, 0xd>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_dcmc(uint64_t val)
+{
+ cpwgr("<0, 0xc>", (uint32_t) val);
+ cpwgr("<0, 0xd>", (uint32_t) (val >> 32));
+}
+
+/* l2 cache access counter */
+static uint64_t csky_pmu_read_l2ac(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0xf>");
+ lo = cprgr("<0, 0xe>");
+ hi = cprgr("<0, 0xf>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_l2ac(uint64_t val)
+{
+ cpwgr("<0, 0xe>", (uint32_t) val);
+ cpwgr("<0, 0xf>", (uint32_t) (val >> 32));
+}
+
+/* l2 cache miss counter */
+static uint64_t csky_pmu_read_l2mc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x11>");
+ lo = cprgr("<0, 0x10>");
+ hi = cprgr("<0, 0x11>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_l2mc(uint64_t val)
+{
+ cpwgr("<0, 0x10>", (uint32_t) val);
+ cpwgr("<0, 0x11>", (uint32_t) (val >> 32));
+}
+
+/* I-UTLB miss counter */
+static uint64_t csky_pmu_read_iutlbmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x15>");
+ lo = cprgr("<0, 0x14>");
+ hi = cprgr("<0, 0x15>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_iutlbmc(uint64_t val)
+{
+ cpwgr("<0, 0x14>", (uint32_t) val);
+ cpwgr("<0, 0x15>", (uint32_t) (val >> 32));
+}
+
+/* D-UTLB miss counter */
+static uint64_t csky_pmu_read_dutlbmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x17>");
+ lo = cprgr("<0, 0x16>");
+ hi = cprgr("<0, 0x17>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_dutlbmc(uint64_t val)
+{
+ cpwgr("<0, 0x16>", (uint32_t) val);
+ cpwgr("<0, 0x17>", (uint32_t) (val >> 32));
+}
+
+/* JTLB miss counter */
+static uint64_t csky_pmu_read_jtlbmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x19>");
+ lo = cprgr("<0, 0x18>");
+ hi = cprgr("<0, 0x19>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_jtlbmc(uint64_t val)
+{
+ cpwgr("<0, 0x18>", (uint32_t) val);
+ cpwgr("<0, 0x19>", (uint32_t) (val >> 32));
+}
+
+/* software counter */
+static uint64_t csky_pmu_read_softc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x1b>");
+ lo = cprgr("<0, 0x1a>");
+ hi = cprgr("<0, 0x1b>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_softc(uint64_t val)
+{
+ cpwgr("<0, 0x1a>", (uint32_t) val);
+ cpwgr("<0, 0x1b>", (uint32_t) (val >> 32));
+}
+
+/* conditional branch mispredict counter */
+static uint64_t csky_pmu_read_cbmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x1d>");
+ lo = cprgr("<0, 0x1c>");
+ hi = cprgr("<0, 0x1d>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_cbmc(uint64_t val)
+{
+ cpwgr("<0, 0x1c>", (uint32_t) val);
+ cpwgr("<0, 0x1d>", (uint32_t) (val >> 32));
+}
+
+/* conditional branch instruction counter */
+static uint64_t csky_pmu_read_cbic(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x1f>");
+ lo = cprgr("<0, 0x1e>");
+ hi = cprgr("<0, 0x1f>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_cbic(uint64_t val)
+{
+ cpwgr("<0, 0x1e>", (uint32_t) val);
+ cpwgr("<0, 0x1f>", (uint32_t) (val >> 32));
+}
+
+/* indirect branch mispredict counter */
+static uint64_t csky_pmu_read_ibmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x21>");
+ lo = cprgr("<0, 0x20>");
+ hi = cprgr("<0, 0x21>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_ibmc(uint64_t val)
+{
+ cpwgr("<0, 0x20>", (uint32_t) val);
+ cpwgr("<0, 0x21>", (uint32_t) (val >> 32));
+}
+
+/* indirect branch instruction counter */
+static uint64_t csky_pmu_read_ibic(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x23>");
+ lo = cprgr("<0, 0x22>");
+ hi = cprgr("<0, 0x23>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_ibic(uint64_t val)
+{
+ cpwgr("<0, 0x22>", (uint32_t) val);
+ cpwgr("<0, 0x23>", (uint32_t) (val >> 32));
+}
+
+/* LSU spec fail counter */
+static uint64_t csky_pmu_read_lsfc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x25>");
+ lo = cprgr("<0, 0x24>");
+ hi = cprgr("<0, 0x25>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_lsfc(uint64_t val)
+{
+ cpwgr("<0, 0x24>", (uint32_t) val);
+ cpwgr("<0, 0x25>", (uint32_t) (val >> 32));
+}
+
+/* store instruction counter */
+static uint64_t csky_pmu_read_sic(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x27>");
+ lo = cprgr("<0, 0x26>");
+ hi = cprgr("<0, 0x27>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_sic(uint64_t val)
+{
+ cpwgr("<0, 0x26>", (uint32_t) val);
+ cpwgr("<0, 0x27>", (uint32_t) (val >> 32));
+}
+
+/* dcache read access counter */
+static uint64_t csky_pmu_read_dcrac(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x29>");
+ lo = cprgr("<0, 0x28>");
+ hi = cprgr("<0, 0x29>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_dcrac(uint64_t val)
+{
+ cpwgr("<0, 0x28>", (uint32_t) val);
+ cpwgr("<0, 0x29>", (uint32_t) (val >> 32));
+}
+
+/* dcache read miss counter */
+static uint64_t csky_pmu_read_dcrmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x2b>");
+ lo = cprgr("<0, 0x2a>");
+ hi = cprgr("<0, 0x2b>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_dcrmc(uint64_t val)
+{
+ cpwgr("<0, 0x2a>", (uint32_t) val);
+ cpwgr("<0, 0x2b>", (uint32_t) (val >> 32));
+}
+
+/* dcache write access counter */
+static uint64_t csky_pmu_read_dcwac(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x2d>");
+ lo = cprgr("<0, 0x2c>");
+ hi = cprgr("<0, 0x2d>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_dcwac(uint64_t val)
+{
+ cpwgr("<0, 0x2c>", (uint32_t) val);
+ cpwgr("<0, 0x2d>", (uint32_t) (val >> 32));
+}
+
+/* dcache write miss counter */
+static uint64_t csky_pmu_read_dcwmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x2f>");
+ lo = cprgr("<0, 0x2e>");
+ hi = cprgr("<0, 0x2f>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_dcwmc(uint64_t val)
+{
+ cpwgr("<0, 0x2e>", (uint32_t) val);
+ cpwgr("<0, 0x2f>", (uint32_t) (val >> 32));
+}
+
+/* l2cache read access counter */
+static uint64_t csky_pmu_read_l2rac(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x31>");
+ lo = cprgr("<0, 0x30>");
+ hi = cprgr("<0, 0x31>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_l2rac(uint64_t val)
+{
+ cpwgr("<0, 0x30>", (uint32_t) val);
+ cpwgr("<0, 0x31>", (uint32_t) (val >> 32));
+}
+
+/* l2cache read miss counter */
+static uint64_t csky_pmu_read_l2rmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x33>");
+ lo = cprgr("<0, 0x32>");
+ hi = cprgr("<0, 0x33>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_l2rmc(uint64_t val)
+{
+ cpwgr("<0, 0x32>", (uint32_t) val);
+ cpwgr("<0, 0x33>", (uint32_t) (val >> 32));
+}
+
+/* l2cache write access counter */
+static uint64_t csky_pmu_read_l2wac(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x35>");
+ lo = cprgr("<0, 0x34>");
+ hi = cprgr("<0, 0x35>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_l2wac(uint64_t val)
+{
+ cpwgr("<0, 0x34>", (uint32_t) val);
+ cpwgr("<0, 0x35>", (uint32_t) (val >> 32));
+}
+
+/* l2cache write miss counter */
+static uint64_t csky_pmu_read_l2wmc(void)
+{
+ uint32_t lo, hi, tmp;
+ uint64_t result;
+
+ do {
+ tmp = cprgr("<0, 0x37>");
+ lo = cprgr("<0, 0x36>");
+ hi = cprgr("<0, 0x37>");
+ } while (hi != tmp);
+
+ result = (uint64_t) (hi) << 32;
+ result |= lo;
+
+ return result;
+}
+
+static void csky_pmu_write_l2wmc(uint64_t val)
+{
+ cpwgr("<0, 0x36>", (uint32_t) val);
+ cpwgr("<0, 0x37>", (uint32_t) (val >> 32));
+}
+
+#define HW_OP_UNSUPPORTED 0xffff
+static const int csky_pmu_hw_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x1,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x2,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xf,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0xe,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = HW_OP_UNSUPPORTED,
+};
+
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xffff
+static const int csky_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = {
+#ifdef CONFIG_CPU_CK810
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x5,
+ [C(RESULT_MISS)] = 0x6,
+ },
+#else
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x14,
+ [C(RESULT_MISS)] = 0x15,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x16,
+ [C(RESULT_MISS)] = 0x17,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+#endif
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x3,
+ [C(RESULT_MISS)] = 0x4,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+#ifdef CONFIG_CPU_CK810
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x7,
+ [C(RESULT_MISS)] = 0x8,
+ },
+#else
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x18,
+ [C(RESULT_MISS)] = 0x19,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x1a,
+ [C(RESULT_MISS)] = 0x1b,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+#endif
+ },
+ [C(DTLB)] = {
+#ifdef CONFIG_CPU_CK810
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+#else
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x14,
+ [C(RESULT_MISS)] = 0xb,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x16,
+ [C(RESULT_MISS)] = 0xb,
+ },
+#endif
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+#ifdef CONFIG_CPU_CK810
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+#else
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x3,
+ [C(RESULT_MISS)] = 0xa,
+ },
+#endif
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+int csky_pmu_event_set_period(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ s64 left = local64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+ int ret = 0;
+
+ if (unlikely(left <= -period)) {
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > (s64)csky_pmu.max_period)
+ left = csky_pmu.max_period;
+
+ /*
+ * The hw event starts counting from this event offset,
+ * mark it to be able to extract future "deltas":
+ */
+ local64_set(&hwc->prev_count, (u64)(-left));
+
+ if (hw_raw_write_mapping[hwc->idx] != NULL)
+ hw_raw_write_mapping[hwc->idx]((u64)(-left) &
+ csky_pmu.max_period);
+
+ cpwcr(HPOFSR, ~BIT(hwc->idx) & cprcr(HPOFSR));
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static void csky_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc)
+{
+ uint64_t prev_raw_count = local64_read(&hwc->prev_count);
+ /*
+ * Sign extend count value to 64bit, otherwise delta calculation
+ * would be incorrect when overflow occurs.
+ */
+ uint64_t new_raw_count = sign_extend64(
+ hw_raw_read_mapping[hwc->idx](), csky_pmu.count_width - 1);
+ int64_t delta = new_raw_count - prev_raw_count;
+
+ /*
+ * We aren't afraid of hwc->prev_count changing beneath our feet
+ * because there's no way for us to re-enter this function anytime.
+ */
+ local64_set(&hwc->prev_count, new_raw_count);
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static void csky_pmu_reset(void *info)
+{
+ cpwcr(HPCR, BIT(31) | BIT(30) | BIT(1));
+}
+
+static void csky_pmu_read(struct perf_event *event)
+{
+ csky_perf_event_update(event, &event->hw);
+}
+
+static int csky_pmu_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+
+ cache_type = (config >> 0) & 0xff;
+ cache_op = (config >> 8) & 0xff;
+ cache_result = (config >> 16) & 0xff;
+
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return -EINVAL;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return -EINVAL;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ return csky_pmu_cache_map[cache_type][cache_op][cache_result];
+}
+
+static int csky_pmu_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int ret;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -ENOENT;
+ ret = csky_pmu_hw_map[event->attr.config];
+ if (ret == HW_OP_UNSUPPORTED)
+ return -ENOENT;
+ hwc->idx = ret;
+ break;
+ case PERF_TYPE_HW_CACHE:
+ ret = csky_pmu_cache_event(event->attr.config);
+ if (ret == CACHE_OP_UNSUPPORTED)
+ return -ENOENT;
+ hwc->idx = ret;
+ break;
+ case PERF_TYPE_RAW:
+ if (hw_raw_read_mapping[event->attr.config] == NULL)
+ return -ENOENT;
+ hwc->idx = event->attr.config;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (event->attr.exclude_user)
+ csky_pmu.hpcr = BIT(2);
+ else if (event->attr.exclude_kernel)
+ csky_pmu.hpcr = BIT(3);
+ else
+ csky_pmu.hpcr = BIT(2) | BIT(3);
+
+ csky_pmu.hpcr |= BIT(1) | BIT(0);
+
+ return 0;
+}
+
+/* starts all counters */
+static void csky_pmu_enable(struct pmu *pmu)
+{
+ cpwcr(HPCR, csky_pmu.hpcr);
+}
+
+/* stops all counters */
+static void csky_pmu_disable(struct pmu *pmu)
+{
+ cpwcr(HPCR, BIT(1));
+}
+
+static void csky_pmu_start(struct perf_event *event, int flags)
+{
+ unsigned long flg;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (WARN_ON_ONCE(idx == -1))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ csky_pmu_event_set_period(event);
+
+ local_irq_save(flg);
+
+ cpwcr(HPINTENR, BIT(idx) | cprcr(HPINTENR));
+ cpwcr(HPCNTENR, BIT(idx) | cprcr(HPCNTENR));
+
+ local_irq_restore(flg);
+}
+
+static void csky_pmu_stop_event(struct perf_event *event)
+{
+ unsigned long flg;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ local_irq_save(flg);
+
+ cpwcr(HPINTENR, ~BIT(idx) & cprcr(HPINTENR));
+ cpwcr(HPCNTENR, ~BIT(idx) & cprcr(HPCNTENR));
+
+ local_irq_restore(flg);
+}
+
+static void csky_pmu_stop(struct perf_event *event, int flags)
+{
+ if (!(event->hw.state & PERF_HES_STOPPED)) {
+ csky_pmu_stop_event(event);
+ event->hw.state |= PERF_HES_STOPPED;
+ }
+
+ if ((flags & PERF_EF_UPDATE) &&
+ !(event->hw.state & PERF_HES_UPTODATE)) {
+ csky_perf_event_update(event, &event->hw);
+ event->hw.state |= PERF_HES_UPTODATE;
+ }
+}
+
+static void csky_pmu_del(struct perf_event *event, int flags)
+{
+ struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ csky_pmu_stop(event, PERF_EF_UPDATE);
+
+ hw_events->events[hwc->idx] = NULL;
+
+ perf_event_update_userpage(event);
+}
+
+/* allocate hardware counter and optionally start counting */
+static int csky_pmu_add(struct perf_event *event, int flags)
+{
+ struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hw_events->events[hwc->idx] = event;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ csky_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static irqreturn_t csky_pmu_handle_irq(int irq_num, void *dev)
+{
+ struct perf_sample_data data;
+ struct pmu_hw_events *cpuc = this_cpu_ptr(csky_pmu.hw_events);
+ struct pt_regs *regs;
+ int idx;
+
+ /*
+ * Did an overflow occur?
+ */
+ if (!cprcr(HPOFSR))
+ return IRQ_NONE;
+
+ /*
+ * Handle the counter(s) overflow(s)
+ */
+ regs = get_irq_regs();
+
+ csky_pmu_disable(&csky_pmu.pmu);
+
+ for (idx = 0; idx < CSKY_PMU_MAX_EVENTS; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+ /*
+ * We have a single interrupt for all counters. Check that
+ * each counter has overflowed before we process it.
+ */
+ if (!(cprcr(HPOFSR) & BIT(idx)))
+ continue;
+
+ hwc = &event->hw;
+ csky_perf_event_update(event, &event->hw);
+ perf_sample_data_init(&data, 0, hwc->last_period);
+ csky_pmu_event_set_period(event);
+
+ if (perf_event_overflow(event, &data, regs))
+ csky_pmu_stop_event(event);
+ }
+
+ csky_pmu_enable(&csky_pmu.pmu);
+
+ /*
+ * Handle the pending perf events.
+ *
+ * Note: this call *must* be run with interrupts disabled. For
+ * platforms that can have the PMU interrupts raised as an NMI, this
+ * will not work.
+ */
+ irq_work_run();
+
+ return IRQ_HANDLED;
+}
+
+static int csky_pmu_request_irq(irq_handler_t handler)
+{
+ int err, irqs;
+ struct platform_device *pmu_device = csky_pmu.plat_device;
+
+ if (!pmu_device)
+ return -ENODEV;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (irqs < 1) {
+ pr_err("no irqs for PMUs defined\n");
+ return -ENODEV;
+ }
+
+ csky_pmu_irq = platform_get_irq(pmu_device, 0);
+ if (csky_pmu_irq < 0)
+ return -ENODEV;
+ err = request_percpu_irq(csky_pmu_irq, handler, "csky-pmu",
+ this_cpu_ptr(csky_pmu.hw_events));
+ if (err) {
+ pr_err("unable to request IRQ%d for CSKY PMU counters\n",
+ csky_pmu_irq);
+ return err;
+ }
+
+ return 0;
+}
+
+static void csky_pmu_free_irq(void)
+{
+ int irq;
+ struct platform_device *pmu_device = csky_pmu.plat_device;
+
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq >= 0)
+ free_percpu_irq(irq, this_cpu_ptr(csky_pmu.hw_events));
+}
+
+int init_hw_perf_events(void)
+{
+ csky_pmu.hw_events = alloc_percpu_gfp(struct pmu_hw_events,
+ GFP_KERNEL);
+ if (!csky_pmu.hw_events) {
+ pr_info("failed to allocate per-cpu PMU data.\n");
+ return -ENOMEM;
+ }
+
+ csky_pmu.pmu = (struct pmu) {
+ .pmu_enable = csky_pmu_enable,
+ .pmu_disable = csky_pmu_disable,
+ .event_init = csky_pmu_event_init,
+ .add = csky_pmu_add,
+ .del = csky_pmu_del,
+ .start = csky_pmu_start,
+ .stop = csky_pmu_stop,
+ .read = csky_pmu_read,
+ };
+
+ memset((void *)hw_raw_read_mapping, 0,
+ sizeof(hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS]));
+
+ hw_raw_read_mapping[0x1] = csky_pmu_read_cc;
+ hw_raw_read_mapping[0x2] = csky_pmu_read_ic;
+ hw_raw_read_mapping[0x3] = csky_pmu_read_icac;
+ hw_raw_read_mapping[0x4] = csky_pmu_read_icmc;
+ hw_raw_read_mapping[0x5] = csky_pmu_read_dcac;
+ hw_raw_read_mapping[0x6] = csky_pmu_read_dcmc;
+ hw_raw_read_mapping[0x7] = csky_pmu_read_l2ac;
+ hw_raw_read_mapping[0x8] = csky_pmu_read_l2mc;
+ hw_raw_read_mapping[0xa] = csky_pmu_read_iutlbmc;
+ hw_raw_read_mapping[0xb] = csky_pmu_read_dutlbmc;
+ hw_raw_read_mapping[0xc] = csky_pmu_read_jtlbmc;
+ hw_raw_read_mapping[0xd] = csky_pmu_read_softc;
+ hw_raw_read_mapping[0xe] = csky_pmu_read_cbmc;
+ hw_raw_read_mapping[0xf] = csky_pmu_read_cbic;
+ hw_raw_read_mapping[0x10] = csky_pmu_read_ibmc;
+ hw_raw_read_mapping[0x11] = csky_pmu_read_ibic;
+ hw_raw_read_mapping[0x12] = csky_pmu_read_lsfc;
+ hw_raw_read_mapping[0x13] = csky_pmu_read_sic;
+ hw_raw_read_mapping[0x14] = csky_pmu_read_dcrac;
+ hw_raw_read_mapping[0x15] = csky_pmu_read_dcrmc;
+ hw_raw_read_mapping[0x16] = csky_pmu_read_dcwac;
+ hw_raw_read_mapping[0x17] = csky_pmu_read_dcwmc;
+ hw_raw_read_mapping[0x18] = csky_pmu_read_l2rac;
+ hw_raw_read_mapping[0x19] = csky_pmu_read_l2rmc;
+ hw_raw_read_mapping[0x1a] = csky_pmu_read_l2wac;
+ hw_raw_read_mapping[0x1b] = csky_pmu_read_l2wmc;
+
+ memset((void *)hw_raw_write_mapping, 0,
+ sizeof(hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS]));
+
+ hw_raw_write_mapping[0x1] = csky_pmu_write_cc;
+ hw_raw_write_mapping[0x2] = csky_pmu_write_ic;
+ hw_raw_write_mapping[0x3] = csky_pmu_write_icac;
+ hw_raw_write_mapping[0x4] = csky_pmu_write_icmc;
+ hw_raw_write_mapping[0x5] = csky_pmu_write_dcac;
+ hw_raw_write_mapping[0x6] = csky_pmu_write_dcmc;
+ hw_raw_write_mapping[0x7] = csky_pmu_write_l2ac;
+ hw_raw_write_mapping[0x8] = csky_pmu_write_l2mc;
+ hw_raw_write_mapping[0xa] = csky_pmu_write_iutlbmc;
+ hw_raw_write_mapping[0xb] = csky_pmu_write_dutlbmc;
+ hw_raw_write_mapping[0xc] = csky_pmu_write_jtlbmc;
+ hw_raw_write_mapping[0xd] = csky_pmu_write_softc;
+ hw_raw_write_mapping[0xe] = csky_pmu_write_cbmc;
+ hw_raw_write_mapping[0xf] = csky_pmu_write_cbic;
+ hw_raw_write_mapping[0x10] = csky_pmu_write_ibmc;
+ hw_raw_write_mapping[0x11] = csky_pmu_write_ibic;
+ hw_raw_write_mapping[0x12] = csky_pmu_write_lsfc;
+ hw_raw_write_mapping[0x13] = csky_pmu_write_sic;
+ hw_raw_write_mapping[0x14] = csky_pmu_write_dcrac;
+ hw_raw_write_mapping[0x15] = csky_pmu_write_dcrmc;
+ hw_raw_write_mapping[0x16] = csky_pmu_write_dcwac;
+ hw_raw_write_mapping[0x17] = csky_pmu_write_dcwmc;
+ hw_raw_write_mapping[0x18] = csky_pmu_write_l2rac;
+ hw_raw_write_mapping[0x19] = csky_pmu_write_l2rmc;
+ hw_raw_write_mapping[0x1a] = csky_pmu_write_l2wac;
+ hw_raw_write_mapping[0x1b] = csky_pmu_write_l2wmc;
+
+ return 0;
+}
+
+static int csky_pmu_starting_cpu(unsigned int cpu)
+{
+ enable_percpu_irq(csky_pmu_irq, 0);
+ return 0;
+}
+
+static int csky_pmu_dying_cpu(unsigned int cpu)
+{
+ disable_percpu_irq(csky_pmu_irq);
+ return 0;
+}
+
+int csky_pmu_device_probe(struct platform_device *pdev,
+ const struct of_device_id *of_table)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int ret;
+
+ ret = init_hw_perf_events();
+ if (ret) {
+ pr_notice("[perf] failed to probe PMU!\n");
+ return ret;
+ }
+
+ if (of_property_read_u32(node, "count-width",
+ &csky_pmu.count_width)) {
+ csky_pmu.count_width = DEFAULT_COUNT_WIDTH;
+ }
+ csky_pmu.max_period = BIT_ULL(csky_pmu.count_width) - 1;
+
+ csky_pmu.plat_device = pdev;
+
+ /* Ensure the PMU has sane values out of reset. */
+ on_each_cpu(csky_pmu_reset, &csky_pmu, 1);
+
+ ret = csky_pmu_request_irq(csky_pmu_handle_irq);
+ if (ret) {
+ csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ pr_notice("[perf] PMU request irq fail!\n");
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_CSKY_ONLINE, "AP_PERF_ONLINE",
+ csky_pmu_starting_cpu,
+ csky_pmu_dying_cpu);
+ if (ret) {
+ csky_pmu_free_irq();
+ free_percpu(csky_pmu.hw_events);
+ return ret;
+ }
+
+ ret = perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW);
+ if (ret) {
+ csky_pmu_free_irq();
+ free_percpu(csky_pmu.hw_events);
+ }
+
+ return ret;
+}
+
+static const struct of_device_id csky_pmu_of_device_ids[] = {
+ {.compatible = "csky,csky-pmu"},
+ {},
+};
+
+static int csky_pmu_dev_probe(struct platform_device *pdev)
+{
+ return csky_pmu_device_probe(pdev, csky_pmu_of_device_ids);
+}
+
+static struct platform_driver csky_pmu_driver = {
+ .driver = {
+ .name = "csky-pmu",
+ .of_match_table = csky_pmu_of_device_ids,
+ },
+ .probe = csky_pmu_dev_probe,
+};
+
+static int __init csky_pmu_probe(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&csky_pmu_driver);
+ if (ret)
+ pr_notice("[perf] PMU initialization failed\n");
+ else
+ pr_notice("[perf] PMU initialization done\n");
+
+ return ret;
+}
+
+device_initcall(csky_pmu_probe);
diff --git a/arch/csky/kernel/perf_regs.c b/arch/csky/kernel/perf_regs.c
new file mode 100644
index 000000000..09b7f88a2
--- /dev/null
+++ b/arch/csky/kernel/perf_regs.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_CSKY_MAX))
+ return 0;
+
+ return (u64)*((u32 *)regs + idx);
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_CSKY_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ return PERF_SAMPLE_REGS_ABI_32;
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/csky/kernel/power.c b/arch/csky/kernel/power.c
new file mode 100644
index 000000000..86ee20290
--- /dev/null
+++ b/arch/csky/kernel/power.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/reboot.h>
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_power_off(void)
+{
+ local_irq_disable();
+ do_kernel_power_off();
+ asm volatile ("bkpt");
+}
+
+void machine_halt(void)
+{
+ local_irq_disable();
+ do_kernel_power_off();
+ asm volatile ("bkpt");
+}
+
+void machine_restart(char *cmd)
+{
+ local_irq_disable();
+ do_kernel_restart(cmd);
+ asm volatile ("bkpt");
+}
diff --git a/arch/csky/kernel/probes/Makefile b/arch/csky/kernel/probes/Makefile
new file mode 100644
index 000000000..1c7c6e6cb
--- /dev/null
+++ b/arch/csky/kernel/probes/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o
+obj-$(CONFIG_KPROBES) += kprobes_trampoline.o
+obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
+obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o
+
+CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/csky/kernel/probes/decode-insn.c b/arch/csky/kernel/probes/decode-insn.c
new file mode 100644
index 000000000..bbc4edc25
--- /dev/null
+++ b/arch/csky/kernel/probes/decode-insn.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <asm/sections.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+/* Return:
+ * INSN_REJECTED If instruction is one not allowed to kprobe,
+ * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
+ */
+enum probe_insn __kprobes
+csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
+{
+ probe_opcode_t insn = le32_to_cpu(*addr);
+
+ CSKY_INSN_SET_SIMULATE(br16, insn);
+ CSKY_INSN_SET_SIMULATE(bt16, insn);
+ CSKY_INSN_SET_SIMULATE(bf16, insn);
+ CSKY_INSN_SET_SIMULATE(jmp16, insn);
+ CSKY_INSN_SET_SIMULATE(jsr16, insn);
+ CSKY_INSN_SET_SIMULATE(lrw16, insn);
+ CSKY_INSN_SET_SIMULATE(pop16, insn);
+
+ CSKY_INSN_SET_SIMULATE(br32, insn);
+ CSKY_INSN_SET_SIMULATE(bt32, insn);
+ CSKY_INSN_SET_SIMULATE(bf32, insn);
+ CSKY_INSN_SET_SIMULATE(jmp32, insn);
+ CSKY_INSN_SET_SIMULATE(jsr32, insn);
+ CSKY_INSN_SET_SIMULATE(lrw32, insn);
+ CSKY_INSN_SET_SIMULATE(pop32, insn);
+
+ CSKY_INSN_SET_SIMULATE(bez32, insn);
+ CSKY_INSN_SET_SIMULATE(bnez32, insn);
+ CSKY_INSN_SET_SIMULATE(bnezad32, insn);
+ CSKY_INSN_SET_SIMULATE(bhsz32, insn);
+ CSKY_INSN_SET_SIMULATE(bhz32, insn);
+ CSKY_INSN_SET_SIMULATE(blsz32, insn);
+ CSKY_INSN_SET_SIMULATE(blz32, insn);
+ CSKY_INSN_SET_SIMULATE(bsr32, insn);
+ CSKY_INSN_SET_SIMULATE(jmpi32, insn);
+ CSKY_INSN_SET_SIMULATE(jsri32, insn);
+
+ return INSN_GOOD;
+}
diff --git a/arch/csky/kernel/probes/decode-insn.h b/arch/csky/kernel/probes/decode-insn.h
new file mode 100644
index 000000000..9c4ad48fe
--- /dev/null
+++ b/arch/csky/kernel/probes/decode-insn.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __CSKY_KERNEL_KPROBES_DECODE_INSN_H
+#define __CSKY_KERNEL_KPROBES_DECODE_INSN_H
+
+#include <asm/sections.h>
+#include <asm/kprobes.h>
+
+enum probe_insn {
+ INSN_REJECTED,
+ INSN_GOOD_NO_SLOT,
+ INSN_GOOD,
+};
+
+#define is_insn32(insn) ((insn & 0xc000) == 0xc000)
+
+enum probe_insn __kprobes
+csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi);
+
+#endif /* __CSKY_KERNEL_KPROBES_DECODE_INSN_H */
diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
new file mode 100644
index 000000000..834cffcfb
--- /dev/null
+++ b/arch/csky/kernel/probes/ftrace.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kprobes.h>
+
+/* Ftrace callback handler for kprobes -- called under preepmt disabled */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+ int bit;
+ bool lr_saver = false;
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+ struct pt_regs *regs;
+
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
+ regs = ftrace_get_regs(fregs);
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (!p) {
+ p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE));
+ if (unlikely(!p) || kprobe_disabled(p))
+ goto out;
+ lr_saver = true;
+ }
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_ip = instruction_pointer(regs);
+
+ if (lr_saver)
+ ip -= MCOUNT_INSN_SIZE;
+ instruction_pointer_set(regs, ip);
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ /*
+ * Emulate singlestep (and also recover regs->pc)
+ * as if there is a nop
+ */
+ instruction_pointer_set(regs,
+ (unsigned long)p->addr + MCOUNT_INSN_SIZE);
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ instruction_pointer_set(regs, orig_ip);
+ }
+ /*
+ * If pre_handler returns !0, it changes regs->pc. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
+ }
+out:
+ ftrace_test_recursion_unlock(bit);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.api.insn = NULL;
+ return 0;
+}
diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
new file mode 100644
index 000000000..3c6e5c725
--- /dev/null
+++ b/arch/csky/kernel/probes/kprobes.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#define pr_fmt(fmt) "kprobes: " fmt
+
+#include <linux/kprobes.h>
+#include <linux/extable.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <asm/ptrace.h>
+#include <linux/uaccess.h>
+#include <asm/sections.h>
+#include <asm/cacheflush.h>
+
+#include "decode-insn.h"
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+
+struct csky_insn_patch {
+ kprobe_opcode_t *addr;
+ u32 opcode;
+ atomic_t cpu_count;
+};
+
+static int __kprobes patch_text_cb(void *priv)
+{
+ struct csky_insn_patch *param = priv;
+ unsigned int addr = (unsigned int)param->addr;
+
+ if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
+ *(u16 *) addr = cpu_to_le16(param->opcode);
+ dcache_wb_range(addr, addr + 2);
+ atomic_inc(&param->cpu_count);
+ } else {
+ while (atomic_read(&param->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ }
+
+ icache_inv_range(addr, addr + 2);
+
+ return 0;
+}
+
+static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+{
+ struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) };
+
+ return stop_machine_cpuslocked(patch_text_cb, &param, cpu_online_mask);
+}
+
+static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+{
+ unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
+
+ p->ainsn.api.restore = (unsigned long)p->addr + offset;
+
+ patch_text(p->ainsn.api.insn, p->opcode);
+}
+
+static void __kprobes arch_prepare_simulate(struct kprobe *p)
+{
+ p->ainsn.api.restore = 0;
+}
+
+static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (p->ainsn.api.handler)
+ p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
+
+ post_kprobe_handler(kcb, regs);
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ unsigned long probe_addr = (unsigned long)p->addr;
+
+ if (probe_addr & 0x1)
+ return -EILSEQ;
+
+ /* copy instruction */
+ p->opcode = le32_to_cpu(*p->addr);
+
+ /* decode instruction */
+ switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) {
+ case INSN_REJECTED: /* insn not supported */
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT: /* insn need simulation */
+ p->ainsn.api.insn = NULL;
+ break;
+
+ case INSN_GOOD: /* instruction uses slot */
+ p->ainsn.api.insn = get_insn_slot();
+ if (!p->ainsn.api.insn)
+ return -ENOMEM;
+ break;
+ }
+
+ /* prepare the instruction */
+ if (p->ainsn.api.insn)
+ arch_prepare_ss_slot(p);
+ else
+ arch_prepare_simulate(p);
+
+ return 0;
+}
+
+/* install breakpoint in text */
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, USR_BKPT);
+}
+
+/* remove breakpoint from text */
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, p->opcode);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.api.insn) {
+ free_insn_slot(p->ainsn.api.insn, 0);
+ p->ainsn.api.insn = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+/*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and start of
+ * out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ kcb->saved_sr = regs->sr;
+ regs->sr &= ~BIT(6);
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ regs->sr = kcb->saved_sr;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
+{
+ unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
+
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + offset;
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+#define TRACE_MODE_SI BIT(14)
+#define TRACE_MODE_MASK ~(0x3 << 14)
+#define TRACE_MODE_RUN 0
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ if (p->ainsn.api.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.api.insn;
+
+ set_ss_context(kcb, slot, p); /* mark pending ss */
+
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(kcb, regs);
+ regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
+ instruction_pointer_set(regs, slot);
+ } else {
+ /* insn simulation */
+ arch_simulate_insn(p, regs);
+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Failed to recover from reentered kprobes.\n");
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.api.restore != 0)
+ regs->pc = cur->ainsn.api.restore;
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler) {
+ /* post_handler can hit breakpoint and single step
+ * again, so we enable D-flag for recursive exception.
+ */
+ cur->post_handler(cur, regs, 0);
+ }
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->pc = (unsigned long) cur->addr;
+ BUG_ON(!instruction_pointer(regs));
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+ }
+ return 0;
+}
+
+int __kprobes
+kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ struct kprobe *p, *cur_kprobe;
+ struct kprobe_ctlblk *kcb;
+ unsigned long addr = instruction_pointer(regs);
+
+ kcb = get_kprobe_ctlblk();
+ cur_kprobe = kprobe_running();
+
+ p = get_kprobe((kprobe_opcode_t *) addr);
+
+ if (p) {
+ if (cur_kprobe) {
+ if (reenter_kprobe(p, regs, kcb))
+ return 1;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
+ *
+ * pre_handler can hit a breakpoint and can step thru
+ * before return.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ setup_singlestep(p, regs, kcb, 0);
+ else
+ reset_current_kprobe();
+ }
+ return 1;
+ }
+
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Return back to original instruction, and continue.
+ */
+ return 0;
+}
+
+int __kprobes
+kprobe_single_step_handler(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if ((kcb->ss_ctx.ss_pending)
+ && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
+ clear_ss_context(kcb); /* clear pending ss */
+
+ kprobes_restore_local_irqflag(kcb, regs);
+ regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
+
+ post_kprobe_handler(kcb, regs);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
+{
+ int ret;
+
+ ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+ return ret;
+}
+
+void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
+{
+ return (void *)kretprobe_trampoline_handler(regs, NULL);
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->lr;
+ ri->fp = NULL;
+ regs->lr = (unsigned long) &__kretprobe_trampoline;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+
+int __init arch_init_kprobes(void)
+{
+ return 0;
+}
diff --git a/arch/csky/kernel/probes/kprobes_trampoline.S b/arch/csky/kernel/probes/kprobes_trampoline.S
new file mode 100644
index 000000000..ba48ad04a
--- /dev/null
+++ b/arch/csky/kernel/probes/kprobes_trampoline.S
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#include <linux/linkage.h>
+
+#include <abi/entry.h>
+
+ENTRY(__kretprobe_trampoline)
+ SAVE_REGS_FTRACE
+
+ mov a0, sp /* pt_regs */
+
+ jbsr trampoline_probe_handler
+
+ /* use the result as the return-address */
+ mov lr, a0
+
+ RESTORE_REGS_FTRACE
+ rts
+ENDPROC(__kretprobe_trampoline)
diff --git a/arch/csky/kernel/probes/simulate-insn.c b/arch/csky/kernel/probes/simulate-insn.c
new file mode 100644
index 000000000..d6e8d092c
--- /dev/null
+++ b/arch/csky/kernel/probes/simulate-insn.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+static inline bool csky_insn_reg_get_val(struct pt_regs *regs,
+ unsigned long index,
+ unsigned long *ptr)
+{
+ if (index < 14)
+ *ptr = *(&regs->a0 + index);
+
+ if (index > 15 && index < 31)
+ *ptr = *(&regs->exregs[0] + index - 16);
+
+ switch (index) {
+ case 14:
+ *ptr = regs->usp;
+ break;
+ case 15:
+ *ptr = regs->lr;
+ break;
+ case 31:
+ *ptr = regs->tls;
+ break;
+ default:
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static inline bool csky_insn_reg_set_val(struct pt_regs *regs,
+ unsigned long index,
+ unsigned long val)
+{
+ if (index < 14)
+ *(&regs->a0 + index) = val;
+
+ if (index > 15 && index < 31)
+ *(&regs->exregs[0] + index - 16) = val;
+
+ switch (index) {
+ case 14:
+ regs->usp = val;
+ break;
+ case 15:
+ regs->lr = val;
+ break;
+ case 31:
+ regs->tls = val;
+ break;
+ default:
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+void __kprobes
+simulate_br16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0x3ff) << 1, 9));
+}
+
+void __kprobes
+simulate_br32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+}
+
+void __kprobes
+simulate_bt16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (regs->sr & 1)
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0x3ff) << 1, 9));
+ else
+ instruction_pointer_set(regs, addr + 2);
+}
+
+void __kprobes
+simulate_bt32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (regs->sr & 1)
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bf16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (!(regs->sr & 1))
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0x3ff) << 1, 9));
+ else
+ instruction_pointer_set(regs, addr + 2);
+}
+
+void __kprobes
+simulate_bf32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (!(regs->sr & 1))
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_jmp16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = (opcode >> 2) & 0xf;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_jmp32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_jsr16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = (opcode >> 2) & 0xf;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ regs->lr = addr + 2;
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_jsr32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ regs->lr = addr + 4;
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_lrw16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long tmp = (opcode & 0x300) >> 3;
+ unsigned long offset = ((opcode & 0x1f) | tmp) << 2;
+
+ tmp = (opcode & 0xe0) >> 5;
+
+ val = *(unsigned int *)(instruction_pointer(regs) + offset);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_lrw32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long offset = (opcode & 0xffff0000) >> 14;
+ unsigned long tmp = opcode & 0x0000001f;
+
+ val = *(unsigned int *)
+ ((instruction_pointer(regs) + offset) & 0xfffffffc);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_pop16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long *tmp = (unsigned long *)regs->usp;
+ int i;
+
+ for (i = 0; i < (opcode & 0xf); i++) {
+ csky_insn_reg_set_val(regs, i + 4, *tmp);
+ tmp += 1;
+ }
+
+ if (opcode & 0x10) {
+ csky_insn_reg_set_val(regs, 15, *tmp);
+ tmp += 1;
+ }
+
+ regs->usp = (unsigned long)tmp;
+
+ instruction_pointer_set(regs, regs->lr);
+}
+
+void __kprobes
+simulate_pop32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long *tmp = (unsigned long *)regs->usp;
+ int i;
+
+ for (i = 0; i < ((opcode & 0xf0000) >> 16); i++) {
+ csky_insn_reg_set_val(regs, i + 4, *tmp);
+ tmp += 1;
+ }
+
+ if (opcode & 0x100000) {
+ csky_insn_reg_set_val(regs, 15, *tmp);
+ tmp += 1;
+ }
+
+ for (i = 0; i < ((opcode & 0xe00000) >> 21); i++) {
+ csky_insn_reg_set_val(regs, i + 16, *tmp);
+ tmp += 1;
+ }
+
+ if (opcode & 0x1000000) {
+ csky_insn_reg_set_val(regs, 29, *tmp);
+ tmp += 1;
+ }
+
+ regs->usp = (unsigned long)tmp;
+
+ instruction_pointer_set(regs, regs->lr);
+}
+
+void __kprobes
+simulate_bez32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ if (tmp == 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bnez32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ if (tmp != 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ long val;
+
+ csky_insn_reg_get_val(regs, tmp, (unsigned long *)&val);
+
+ val -= 1;
+
+ if (val > 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+
+ csky_insn_reg_set_val(regs, tmp, (unsigned long)val);
+}
+
+void __kprobes
+simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if ((long) val >= 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if ((long) val > 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if ((long) val <= 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_blz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if ((long) val < 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bsr32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp;
+
+ tmp = (opcode & 0xffff) << 16;
+ tmp |= (opcode & 0xffff0000) >> 16;
+
+ instruction_pointer_set(regs,
+ addr + sign_extend32((tmp & 0x3ffffff) << 1, 15));
+
+ regs->lr = addr + 4;
+}
+
+void __kprobes
+simulate_jmpi32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long offset = ((opcode & 0xffff0000) >> 14);
+
+ val = *(unsigned int *)
+ ((instruction_pointer(regs) + offset) & 0xfffffffc);
+
+ instruction_pointer_set(regs, val);
+}
+
+void __kprobes
+simulate_jsri32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long offset = ((opcode & 0xffff0000) >> 14);
+
+ val = *(unsigned int *)
+ ((instruction_pointer(regs) + offset) & 0xfffffffc);
+
+ regs->lr = addr + 4;
+
+ instruction_pointer_set(regs, val);
+}
diff --git a/arch/csky/kernel/probes/simulate-insn.h b/arch/csky/kernel/probes/simulate-insn.h
new file mode 100644
index 000000000..ba4cb7ef0
--- /dev/null
+++ b/arch/csky/kernel/probes/simulate-insn.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __CSKY_KERNEL_PROBES_SIMULATE_INSN_H
+#define __CSKY_KERNEL_PROBES_SIMULATE_INSN_H
+
+#define __CSKY_INSN_FUNCS(name, mask, val) \
+static __always_inline bool csky_insn_is_##name(probe_opcode_t code) \
+{ \
+ BUILD_BUG_ON(~(mask) & (val)); \
+ return (code & (mask)) == (val); \
+} \
+void simulate_##name(u32 opcode, long addr, struct pt_regs *regs);
+
+#define CSKY_INSN_SET_SIMULATE(name, code) \
+ do { \
+ if (csky_insn_is_##name(code)) { \
+ api->handler = simulate_##name; \
+ return INSN_GOOD_NO_SLOT; \
+ } \
+ } while (0)
+
+__CSKY_INSN_FUNCS(br16, 0xfc00, 0x0400)
+__CSKY_INSN_FUNCS(bt16, 0xfc00, 0x0800)
+__CSKY_INSN_FUNCS(bf16, 0xfc00, 0x0c00)
+__CSKY_INSN_FUNCS(jmp16, 0xffc3, 0x7800)
+__CSKY_INSN_FUNCS(jsr16, 0xffc3, 0x7801)
+__CSKY_INSN_FUNCS(lrw16, 0xfc00, 0x1000)
+__CSKY_INSN_FUNCS(pop16, 0xffe0, 0x1480)
+
+__CSKY_INSN_FUNCS(br32, 0x0000ffff, 0x0000e800)
+__CSKY_INSN_FUNCS(bt32, 0x0000ffff, 0x0000e860)
+__CSKY_INSN_FUNCS(bf32, 0x0000ffff, 0x0000e840)
+__CSKY_INSN_FUNCS(jmp32, 0xffffffe0, 0x0000e8c0)
+__CSKY_INSN_FUNCS(jsr32, 0xffffffe0, 0x0000e8e0)
+__CSKY_INSN_FUNCS(lrw32, 0x0000ffe0, 0x0000ea80)
+__CSKY_INSN_FUNCS(pop32, 0xfe00ffff, 0x0000ebc0)
+
+__CSKY_INSN_FUNCS(bez32, 0x0000ffe0, 0x0000e900)
+__CSKY_INSN_FUNCS(bnez32, 0x0000ffe0, 0x0000e920)
+__CSKY_INSN_FUNCS(bnezad32, 0x0000ffe0, 0x0000e820)
+__CSKY_INSN_FUNCS(bhsz32, 0x0000ffe0, 0x0000e9a0)
+__CSKY_INSN_FUNCS(bhz32, 0x0000ffe0, 0x0000e940)
+__CSKY_INSN_FUNCS(blsz32, 0x0000ffe0, 0x0000e960)
+__CSKY_INSN_FUNCS(blz32, 0x0000ffe0, 0x0000e980)
+__CSKY_INSN_FUNCS(bsr32, 0x0000fc00, 0x0000e000)
+__CSKY_INSN_FUNCS(jmpi32, 0x0000ffff, 0x0000eac0)
+__CSKY_INSN_FUNCS(jsri32, 0x0000ffff, 0x0000eae0)
+
+#endif /* __CSKY_KERNEL_PROBES_SIMULATE_INSN_H */
diff --git a/arch/csky/kernel/probes/uprobes.c b/arch/csky/kernel/probes/uprobes.c
new file mode 100644
index 000000000..2d31a12e4
--- /dev/null
+++ b/arch/csky/kernel/probes/uprobes.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
+ */
+#include <linux/highmem.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+#include <asm/cacheflush.h>
+
+#include "decode-insn.h"
+
+#define UPROBE_TRAP_NR UINT_MAX
+
+bool is_swbp_insn(uprobe_opcode_t *insn)
+{
+ return (*insn & 0xffff) == UPROBE_SWBP_INSN;
+}
+
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long addr)
+{
+ probe_opcode_t insn;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+
+ auprobe->insn_size = is_insn32(insn) ? 4 : 2;
+
+ switch (csky_probe_decode_insn(&insn, &auprobe->api)) {
+ case INSN_REJECTED:
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT:
+ auprobe->simulate = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ utask->autask.saved_trap_no = current->thread.trap_no;
+ current->thread.trap_no = UPROBE_TRAP_NR;
+
+ instruction_pointer_set(regs, utask->xol_vaddr);
+
+ user_enable_single_step(current);
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR);
+
+ instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
+
+ user_disable_single_step(current);
+
+ return 0;
+}
+
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ if (t->thread.trap_no != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ probe_opcode_t insn;
+ unsigned long addr;
+
+ if (!auprobe->simulate)
+ return false;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+ addr = instruction_pointer(regs);
+
+ if (auprobe->api.handler)
+ auprobe->api.handler(insn, addr, regs);
+
+ return true;
+}
+
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /*
+ * Task has received a fatal signal, so reset back to probed
+ * address.
+ */
+ instruction_pointer_set(regs, utask->vaddr);
+
+ user_disable_single_step(current);
+}
+
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return regs->usp <= ret->stack;
+ else
+ return regs->usp < ret->stack;
+}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+ struct pt_regs *regs)
+{
+ unsigned long ra;
+
+ ra = regs->lr;
+
+ regs->lr = trampoline_vaddr;
+
+ return ra;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+int uprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ if (uprobe_pre_sstep_notifier(regs))
+ return 1;
+
+ return 0;
+}
+
+int uprobe_single_step_handler(struct pt_regs *regs)
+{
+ if (uprobe_post_sstep_notifier(regs))
+ return 1;
+
+ return 0;
+}
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
new file mode 100644
index 000000000..eedddb155
--- /dev/null
+++ b/arch/csky/kernel/process.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sched/debug.h>
+#include <linux/delay.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+
+#include <asm/elf.h>
+#include <abi/reg_ops.h>
+
+struct cpuinfo_csky cpu_data[NR_CPUS];
+
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
+
+/*
+ * Some archs flush debug and FPU info here
+ */
+void flush_thread(void){}
+
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
+ struct switch_stack *childstack;
+ struct pt_regs *childregs = task_pt_regs(p);
+
+#ifdef CONFIG_CPU_HAS_FPU
+ save_to_user_fp(&p->thread.user_fp);
+#endif
+
+ childstack = ((struct switch_stack *) childregs) - 1;
+ memset(childstack, 0, sizeof(struct switch_stack));
+
+ /* setup thread.sp for switch_to !!! */
+ p->thread.sp = (unsigned long)childstack;
+
+ if (unlikely(args->fn)) {
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childstack->r15 = (unsigned long) ret_from_kernel_thread;
+ childstack->r10 = (unsigned long) args->fn_arg;
+ childstack->r9 = (unsigned long) args->fn;
+ childregs->sr = mfcr("psr");
+ } else {
+ *childregs = *(current_pt_regs());
+ if (usp)
+ childregs->usp = usp;
+ if (clone_flags & CLONE_SETTLS)
+ task_thread_info(p)->tp_value = childregs->tls
+ = tls;
+
+ childregs->a0 = 0;
+ childstack->r15 = (unsigned long) ret_from_fork;
+ }
+
+ return 0;
+}
+
+/* Fill in the fpu structure for a core dump. */
+int dump_fpu(struct pt_regs *regs, struct user_fp *fpu)
+{
+ memcpy(fpu, &current->thread.user_fp, sizeof(*fpu));
+ return 1;
+}
+EXPORT_SYMBOL(dump_fpu);
+
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
+{
+ struct pt_regs *regs = task_pt_regs(tsk);
+
+ /* NOTE: usp is error value. */
+ ELF_CORE_COPY_REGS((*pr_regs), regs)
+
+ return 1;
+}
+
+#ifndef CONFIG_CPU_PM_NONE
+void arch_cpu_idle(void)
+{
+#ifdef CONFIG_CPU_PM_WAIT
+ asm volatile("wait\n");
+#endif
+
+#ifdef CONFIG_CPU_PM_DOZE
+ asm volatile("doze\n");
+#endif
+
+#ifdef CONFIG_CPU_PM_STOP
+ asm volatile("stop\n");
+#endif
+ raw_local_irq_enable();
+}
+#endif
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
new file mode 100644
index 000000000..0f7e7b653
--- /dev/null
+++ b/arch/csky/kernel/ptrace.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/audit.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/uaccess.h>
+#include <linux/user.h>
+
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/asm-offsets.h>
+
+#include <abi/regdef.h>
+#include <abi/ckmmu.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+/* sets the trace bits. */
+#define TRACE_MODE_SI (1 << 14)
+#define TRACE_MODE_RUN 0
+#define TRACE_MODE_MASK ~(0x3 << 14)
+
+/*
+ * Make sure the single step bit is not set.
+ */
+static void singlestep_disable(struct task_struct *tsk)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(tsk);
+ regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
+
+ /* Enable irq */
+ regs->sr |= BIT(6);
+}
+
+static void singlestep_enable(struct task_struct *tsk)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(tsk);
+ regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
+
+ /* Disable irq */
+ regs->sr &= ~BIT(6);
+}
+
+/*
+ * Make sure the single step bit is set.
+ */
+void user_enable_single_step(struct task_struct *child)
+{
+ singlestep_enable(child);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ singlestep_disable(child);
+}
+
+enum csky_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+};
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+
+ /* Abiv1 regs->tls is fake and we need sync here. */
+ regs->tls = task_thread_info(target)->tp_value;
+
+ return membuf_write(&to, regs, sizeof(*regs));
+}
+
+static int gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct pt_regs regs;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, -1);
+ if (ret)
+ return ret;
+
+ /* BIT(0) of regs.sr is Condition Code/Carry bit */
+ regs.sr = (regs.sr & BIT(0)) | (task_pt_regs(target)->sr & ~BIT(0));
+#ifdef CONFIG_CPU_HAS_HILO
+ regs.dcsr = task_pt_regs(target)->dcsr;
+#endif
+ task_thread_info(target)->tp_value = regs.tls;
+
+ *task_pt_regs(target) = regs;
+
+ return 0;
+}
+
+static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct user_fp *regs = (struct user_fp *)&target->thread.user_fp;
+
+#if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP)
+ int i;
+ struct user_fp tmp = *regs;
+
+ for (i = 0; i < 16; i++) {
+ tmp.vr[i*4] = regs->vr[i*2];
+ tmp.vr[i*4 + 1] = regs->vr[i*2 + 1];
+ }
+
+ for (i = 0; i < 32; i++)
+ tmp.vr[64 + i] = regs->vr[32 + i];
+
+ return membuf_write(&to, &tmp, sizeof(tmp));
+#else
+ return membuf_write(&to, regs, sizeof(*regs));
+#endif
+}
+
+static int fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct user_fp *regs = (struct user_fp *)&target->thread.user_fp;
+
+#if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP)
+ int i;
+ struct user_fp tmp;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tmp, 0, -1);
+
+ *regs = tmp;
+
+ for (i = 0; i < 16; i++) {
+ regs->vr[i*2] = tmp.vr[i*4];
+ regs->vr[i*2 + 1] = tmp.vr[i*4 + 1];
+ }
+
+ for (i = 0; i < 32; i++)
+ regs->vr[32 + i] = tmp.vr[64 + i];
+#else
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
+#endif
+
+ return ret;
+}
+
+static const struct user_regset csky_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = sizeof(struct pt_regs) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = sizeof(struct user_fp) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = fpr_get,
+ .set = fpr_set,
+ },
+};
+
+static const struct user_regset_view user_csky_view = {
+ .name = "csky",
+ .e_machine = ELF_ARCH,
+ .regsets = csky_regsets,
+ .n = ARRAY_SIZE(csky_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_csky_view;
+}
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(tls),
+ REG_OFFSET_NAME(lr),
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(sr),
+ REG_OFFSET_NAME(usp),
+ REG_OFFSET_NAME(orig_a0),
+ REG_OFFSET_NAME(a0),
+ REG_OFFSET_NAME(a1),
+ REG_OFFSET_NAME(a2),
+ REG_OFFSET_NAME(a3),
+ REG_OFFSET_NAME(regs[0]),
+ REG_OFFSET_NAME(regs[1]),
+ REG_OFFSET_NAME(regs[2]),
+ REG_OFFSET_NAME(regs[3]),
+ REG_OFFSET_NAME(regs[4]),
+ REG_OFFSET_NAME(regs[5]),
+ REG_OFFSET_NAME(regs[6]),
+ REG_OFFSET_NAME(regs[7]),
+ REG_OFFSET_NAME(regs[8]),
+ REG_OFFSET_NAME(regs[9]),
+#if defined(__CSKYABIV2__)
+ REG_OFFSET_NAME(exregs[0]),
+ REG_OFFSET_NAME(exregs[1]),
+ REG_OFFSET_NAME(exregs[2]),
+ REG_OFFSET_NAME(exregs[3]),
+ REG_OFFSET_NAME(exregs[4]),
+ REG_OFFSET_NAME(exregs[5]),
+ REG_OFFSET_NAME(exregs[6]),
+ REG_OFFSET_NAME(exregs[7]),
+ REG_OFFSET_NAME(exregs[8]),
+ REG_OFFSET_NAME(exregs[9]),
+ REG_OFFSET_NAME(exregs[10]),
+ REG_OFFSET_NAME(exregs[11]),
+ REG_OFFSET_NAME(exregs[12]),
+ REG_OFFSET_NAME(exregs[13]),
+ REG_OFFSET_NAME(exregs[14]),
+ REG_OFFSET_NAME(rhi),
+ REG_OFFSET_NAME(rlo),
+ REG_OFFSET_NAME(dcsr),
+#endif
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return (addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+ singlestep_disable(child);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ long ret = -EIO;
+
+ switch (request) {
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ if (ptrace_report_syscall_entry(regs))
+ return -1;
+
+ if (secure_computing() == -1)
+ return -1;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, syscall_get_nr(current, regs));
+
+ audit_syscall_entry(regs_syscallid(regs), regs->a0, regs->a1, regs->a2, regs->a3);
+ return 0;
+}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ audit_syscall_exit(regs);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ptrace_report_syscall_exit(regs, 0);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, syscall_get_return_value(current, regs));
+}
+
+#ifdef CONFIG_CPU_CK860
+static void show_iutlb(void)
+{
+ int entry, i;
+ unsigned long flags;
+ unsigned long oldpid;
+ unsigned long entryhi[16], entrylo0[16], entrylo1[16];
+
+ oldpid = read_mmu_entryhi();
+
+ entry = 0x8000;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < 16; i++) {
+ write_mmu_index(entry);
+ tlb_read();
+ entryhi[i] = read_mmu_entryhi();
+ entrylo0[i] = read_mmu_entrylo0();
+ entrylo1[i] = read_mmu_entrylo1();
+
+ entry++;
+ }
+
+ local_irq_restore(flags);
+
+ write_mmu_entryhi(oldpid);
+
+ printk("\n\n\n");
+ for (i = 0; i < 16; i++)
+ printk("iutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;"
+ " entrylo1 - 0x%lx\n",
+ i, entryhi[i], entrylo0[i], entrylo1[i]);
+ printk("\n\n\n");
+}
+
+static void show_dutlb(void)
+{
+ int entry, i;
+ unsigned long flags;
+ unsigned long oldpid;
+ unsigned long entryhi[16], entrylo0[16], entrylo1[16];
+
+ oldpid = read_mmu_entryhi();
+
+ entry = 0x4000;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < 16; i++) {
+ write_mmu_index(entry);
+ tlb_read();
+ entryhi[i] = read_mmu_entryhi();
+ entrylo0[i] = read_mmu_entrylo0();
+ entrylo1[i] = read_mmu_entrylo1();
+
+ entry++;
+ }
+
+ local_irq_restore(flags);
+
+ write_mmu_entryhi(oldpid);
+
+ printk("\n\n\n");
+ for (i = 0; i < 16; i++)
+ printk("dutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;"
+ " entrylo1 - 0x%lx\n",
+ i, entryhi[i], entrylo0[i], entrylo1[i]);
+ printk("\n\n\n");
+}
+
+static unsigned long entryhi[1024], entrylo0[1024], entrylo1[1024];
+static void show_jtlb(void)
+{
+ int entry;
+ unsigned long flags;
+ unsigned long oldpid;
+
+ oldpid = read_mmu_entryhi();
+
+ entry = 0;
+
+ local_irq_save(flags);
+ while (entry < 1024) {
+ write_mmu_index(entry);
+ tlb_read();
+ entryhi[entry] = read_mmu_entryhi();
+ entrylo0[entry] = read_mmu_entrylo0();
+ entrylo1[entry] = read_mmu_entrylo1();
+
+ entry++;
+ }
+ local_irq_restore(flags);
+
+ write_mmu_entryhi(oldpid);
+
+ printk("\n\n\n");
+
+ for (entry = 0; entry < 1024; entry++)
+ printk("jtlb[%x]: entryhi - 0x%lx; entrylo0 - 0x%lx;"
+ " entrylo1 - 0x%lx\n",
+ entry, entryhi[entry], entrylo0[entry], entrylo1[entry]);
+ printk("\n\n\n");
+}
+
+static void show_tlb(void)
+{
+ show_iutlb();
+ show_dutlb();
+ show_jtlb();
+}
+#else
+static void show_tlb(void)
+{
+ return;
+}
+#endif
+
+void show_regs(struct pt_regs *fp)
+{
+ pr_info("\nCURRENT PROCESS:\n\n");
+ pr_info("COMM=%s PID=%d\n", current->comm, current->pid);
+
+ if (current->mm) {
+ pr_info("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
+ (int) current->mm->start_code,
+ (int) current->mm->end_code,
+ (int) current->mm->start_data,
+ (int) current->mm->end_data,
+ (int) current->mm->end_data,
+ (int) current->mm->brk);
+ pr_info("USER-STACK=%08x KERNEL-STACK=%08x\n\n",
+ (int) current->mm->start_stack,
+ (int) (((unsigned long) current) + 2 * PAGE_SIZE));
+ }
+
+ pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc);
+ pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr);
+ pr_info("SP: 0x%08lx\n", (long)fp->usp);
+ pr_info("PSR: 0x%08lx\n", (long)fp->sr);
+ pr_info("orig_a0: 0x%08lx\n", fp->orig_a0);
+ pr_info("PT_REGS: 0x%08lx\n", (long)fp);
+
+ pr_info(" a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n",
+ fp->a0, fp->a1, fp->a2, fp->a3);
+#if defined(__CSKYABIV2__)
+ pr_info(" r4: 0x%08lx r5: 0x%08lx r6: 0x%08lx r7: 0x%08lx\n",
+ fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]);
+ pr_info(" r8: 0x%08lx r9: 0x%08lx r10: 0x%08lx r11: 0x%08lx\n",
+ fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]);
+ pr_info("r12: 0x%08lx r13: 0x%08lx r15: 0x%08lx\n",
+ fp->regs[8], fp->regs[9], fp->lr);
+ pr_info("r16: 0x%08lx r17: 0x%08lx r18: 0x%08lx r19: 0x%08lx\n",
+ fp->exregs[0], fp->exregs[1], fp->exregs[2], fp->exregs[3]);
+ pr_info("r20: 0x%08lx r21: 0x%08lx r22: 0x%08lx r23: 0x%08lx\n",
+ fp->exregs[4], fp->exregs[5], fp->exregs[6], fp->exregs[7]);
+ pr_info("r24: 0x%08lx r25: 0x%08lx r26: 0x%08lx r27: 0x%08lx\n",
+ fp->exregs[8], fp->exregs[9], fp->exregs[10], fp->exregs[11]);
+ pr_info("r28: 0x%08lx r29: 0x%08lx r30: 0x%08lx tls: 0x%08lx\n",
+ fp->exregs[12], fp->exregs[13], fp->exregs[14], fp->tls);
+ pr_info(" hi: 0x%08lx lo: 0x%08lx\n",
+ fp->rhi, fp->rlo);
+#else
+ pr_info(" r6: 0x%08lx r7: 0x%08lx r8: 0x%08lx r9: 0x%08lx\n",
+ fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]);
+ pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n",
+ fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]);
+ pr_info("r14: 0x%08lx r1: 0x%08lx\n",
+ fp->regs[8], fp->regs[9]);
+#endif
+
+ show_tlb();
+
+ return;
+}
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
new file mode 100644
index 000000000..106fbf0b6
--- /dev/null
+++ b/arch/csky/kernel/setup.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/console.h>
+#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/start_kernel.h>
+#include <linux/dma-map-ops.h>
+#include <linux/screen_info.h>
+#include <asm/sections.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+
+#ifdef CONFIG_DUMMY_CONSOLE
+struct screen_info screen_info = {
+ .orig_video_lines = 30,
+ .orig_video_cols = 80,
+ .orig_video_mode = 0,
+ .orig_video_ega_bx = 0,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 8
+};
+#endif
+
+static void __init csky_memblock_init(void)
+{
+ unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
+ unsigned long sseg_size = PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET);
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
+ signed long size;
+
+ memblock_reserve(__pa(_start), _end - _start);
+
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
+ memblock_dump_all();
+
+ min_low_pfn = PFN_UP(memblock_start_of_DRAM());
+ max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+
+ size = max_pfn - min_low_pfn;
+
+ if (size >= lowmem_size) {
+ max_low_pfn = min_low_pfn + lowmem_size;
+#ifdef CONFIG_PAGE_OFFSET_80000000
+ write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE);
+#endif
+ } else if (size > sseg_size) {
+ max_low_pfn = min_low_pfn + sseg_size;
+ }
+
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
+
+ mmu_init(min_low_pfn, max_low_pfn);
+
+#ifdef CONFIG_HIGHMEM
+ max_zone_pfn[ZONE_HIGHMEM] = max_pfn;
+
+ highstart_pfn = max_low_pfn;
+ highend_pfn = max_pfn;
+#endif
+ memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+
+ dma_contiguous_reserve(0);
+
+ free_area_init(max_zone_pfn);
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ *cmdline_p = boot_command_line;
+
+ console_verbose();
+
+ pr_info("Phys. mem: %ldMB\n",
+ (unsigned long) memblock_phys_mem_size()/1024/1024);
+
+ setup_initial_init_mm(_start, _etext, _edata, _end);
+
+ parse_early_param();
+
+ csky_memblock_init();
+
+ unflatten_and_copy_device_tree();
+
+#ifdef CONFIG_SMP
+ setup_smp();
+#endif
+
+ sparse_init();
+
+ fixaddr_init();
+
+#ifdef CONFIG_HIGHMEM
+ kmap_init();
+#endif
+}
+
+unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
+
+static inline unsigned long read_mmu_msa(void)
+{
+#ifdef CONFIG_PAGE_OFFSET_80000000
+ return read_mmu_msa0();
+#endif
+
+#ifdef CONFIG_PAGE_OFFSET_A0000000
+ return read_mmu_msa1();
+#endif
+}
+
+asmlinkage __visible void __init csky_start(unsigned int unused,
+ void *dtb_start)
+{
+ /* Clean up bss section */
+ memset(__bss_start, 0, __bss_stop - __bss_start);
+
+ va_pa_offset = read_mmu_msa() & ~(SSEG_SIZE - 1);
+
+ pre_trap_init();
+
+ if (dtb_start == NULL)
+ early_init_dt_scan(__dtb_start);
+ else
+ early_init_dt_scan(dtb_start);
+
+ start_kernel();
+
+ asm volatile("br .\n");
+}
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
new file mode 100644
index 000000000..b7b368528
--- /dev/null
+++ b/arch/csky/kernel/signal.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/resume_user_mode.h>
+
+#include <asm/traps.h>
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+
+#include <abi/regdef.h>
+
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+static int restore_fpu_state(struct sigcontext __user *sc)
+{
+ int err = 0;
+ struct user_fp user_fp;
+
+ err = __copy_from_user(&user_fp, &sc->sc_user_fp, sizeof(user_fp));
+
+ restore_from_user_fp(&user_fp);
+
+ return err;
+}
+
+static int save_fpu_state(struct sigcontext __user *sc)
+{
+ struct user_fp user_fp;
+
+ save_to_user_fp(&user_fp);
+
+ return __copy_to_user(&sc->sc_user_fp, &user_fp, sizeof(user_fp));
+}
+#else
+#define restore_fpu_state(sigcontext) (0)
+#define save_fpu_state(sigcontext) (0)
+#endif
+
+struct rt_sigframe {
+ /*
+ * pad[3] is compatible with the same struct defined in
+ * gcc/libgcc/config/csky/linux-unwind.h
+ */
+ int pad[3];
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+static long restore_sigcontext(struct pt_regs *regs,
+ struct sigcontext __user *sc)
+{
+ int err = 0;
+ unsigned long sr = regs->sr;
+
+ /* sc_pt_regs is structured the same as the start of pt_regs */
+ err |= __copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs));
+
+ /* BIT(0) of regs->sr is Condition Code/Carry bit */
+ regs->sr = (sr & ~1) | (regs->sr & 1);
+
+ /* Restore the floating-point state. */
+ err |= restore_fpu_state(sc);
+
+ return err;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct pt_regs *regs = current_pt_regs();
+ struct rt_sigframe __user *frame;
+ sigset_t set;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ frame = (struct rt_sigframe __user *)regs->usp;
+
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ goto badframe;
+
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return regs->a0;
+
+badframe:
+ force_sig(SIGSEGV);
+ return 0;
+}
+
+static int setup_sigcontext(struct rt_sigframe __user *frame,
+ struct pt_regs *regs)
+{
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ int err = 0;
+
+ err |= __copy_to_user(&sc->sc_pt_regs, regs, sizeof(struct pt_regs));
+ err |= save_fpu_state(sc);
+
+ return err;
+}
+
+static inline void __user *get_sigframe(struct ksignal *ksig,
+ struct pt_regs *regs, size_t framesize)
+{
+ unsigned long sp;
+ /* Default to using normal stack */
+ sp = regs->usp;
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
+ return (void __user __force *)(-1UL);
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ sp = sigsp(sp, ksig) - framesize;
+
+ /* Align the stack frame. */
+ sp &= -8UL;
+
+ return (void __user *)sp;
+}
+
+static int
+setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof(*frame)))
+ return -EFAULT;
+
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->usp);
+ err |= setup_sigcontext(frame, regs);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ if (err)
+ return -EFAULT;
+
+ /* Set up to return from userspace. */
+ regs->lr = (unsigned long)VDSO_SYMBOL(
+ current->mm->context.vdso, rt_sigreturn);
+
+ /*
+ * Set up registers for signal handler.
+ * Registers that we don't modify keep the value they had from
+ * user-space at the time we took the signal.
+ * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+ * since some things rely on this (e.g. glibc's debug/segfault.c).
+ */
+ regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
+ regs->usp = (unsigned long)frame;
+ regs->a0 = ksig->sig; /* a0: signal number */
+ regs->a1 = (unsigned long)(&(frame->info)); /* a1: siginfo pointer */
+ regs->a2 = (unsigned long)(&(frame->uc)); /* a2: ucontext pointer */
+
+ return 0;
+}
+
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+
+ rseq_signal_deliver(ksig, regs);
+
+ /* Are we from a system call? */
+ if (in_syscall(regs)) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ forget_syscall(regs);
+
+ /* If so, check system call restarting.. */
+ switch (regs->a0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->a0 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->a0 = -EINTR;
+ break;
+ }
+ fallthrough;
+ case -ERESTARTNOINTR:
+ regs->a0 = regs->orig_a0;
+ regs->pc -= TRAP0_SIZE;
+ break;
+ }
+ }
+
+ /* Set up the stack frame */
+ ret = setup_rt_frame(ksig, oldset, regs);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+static void do_signal(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ if (get_signal(&ksig)) {
+ /* Actually deliver the signal */
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /* Did we come from a system call? */
+ if (in_syscall(regs)) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ forget_syscall(regs);
+
+ /* Restart the system call - no handlers present */
+ switch (regs->a0) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->a0 = regs->orig_a0;
+ regs->pc -= TRAP0_SIZE;
+ break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->a0 = regs->orig_a0;
+ regs_syscallid(regs) = __NR_restart_syscall;
+ regs->pc -= TRAP0_SIZE;
+ break;
+ }
+ }
+
+ /*
+ * If there is no signal to deliver, we just put the saved
+ * sigmask back.
+ */
+ restore_saved_sigmask();
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by the _TIF_WORK_MASK flags
+ */
+asmlinkage void do_notify_resume(struct pt_regs *regs,
+ unsigned long thread_info_flags)
+{
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
+ /* Handle pending signal delivery */
+ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ do_signal(regs);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(regs);
+}
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
new file mode 100644
index 000000000..4b605aa2e
--- /dev/null
+++ b/arch/csky/kernel/smp.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/percpu.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/irq_work.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/seq_file.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/hotplug.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/sections.h>
+#include <asm/mmu_context.h>
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+#endif
+
+enum ipi_message_type {
+ IPI_EMPTY,
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_IRQ_WORK,
+ IPI_MAX
+};
+
+struct ipi_data_struct {
+ unsigned long bits ____cacheline_aligned;
+ unsigned long stats[IPI_MAX] ____cacheline_aligned;
+};
+static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data);
+
+static irqreturn_t handle_ipi(int irq, void *dev)
+{
+ unsigned long *stats = this_cpu_ptr(&ipi_data)->stats;
+
+ while (true) {
+ unsigned long ops;
+
+ ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0);
+ if (ops == 0)
+ return IRQ_HANDLED;
+
+ if (ops & (1 << IPI_RESCHEDULE)) {
+ stats[IPI_RESCHEDULE]++;
+ scheduler_ipi();
+ }
+
+ if (ops & (1 << IPI_CALL_FUNC)) {
+ stats[IPI_CALL_FUNC]++;
+ generic_smp_call_function_interrupt();
+ }
+
+ if (ops & (1 << IPI_IRQ_WORK)) {
+ stats[IPI_IRQ_WORK]++;
+ irq_work_run();
+ }
+
+ BUG_ON((ops >> IPI_MAX) != 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void (*send_arch_ipi)(const struct cpumask *mask);
+
+static int ipi_irq;
+void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq)
+{
+ if (send_arch_ipi)
+ return;
+
+ send_arch_ipi = func;
+ ipi_irq = irq;
+}
+
+static void
+send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
+{
+ int i;
+
+ for_each_cpu(i, to_whom)
+ set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits);
+
+ smp_mb();
+ send_arch_ipi(to_whom);
+}
+
+static const char * const ipi_names[] = {
+ [IPI_EMPTY] = "Empty interrupts",
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_IRQ_WORK] = "Irq work interrupts",
+};
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ unsigned int cpu, i;
+
+ for (i = 0; i < IPI_MAX; i++) {
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10lu ",
+ per_cpu_ptr(&ipi_data, cpu)->stats[i]);
+ seq_printf(p, " %s\n", ipi_names[i]);
+ }
+
+ return 0;
+}
+
+void arch_send_call_function_ipi_mask(struct cpumask *mask)
+{
+ send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+static void ipi_stop(void *unused)
+{
+ while (1);
+}
+
+void smp_send_stop(void)
+{
+ on_each_cpu(ipi_stop, NULL, 1);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+}
+#endif
+
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+}
+
+static int ipi_dummy_dev;
+
+void __init setup_smp_ipi(void)
+{
+ int rc;
+
+ if (ipi_irq == 0)
+ return;
+
+ rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
+ &ipi_dummy_dev);
+ if (rc)
+ panic("%s IRQ request failed\n", __func__);
+
+ enable_percpu_irq(ipi_irq, 0);
+}
+
+void __init setup_smp(void)
+{
+ struct device_node *node = NULL;
+ unsigned int cpu;
+
+ for_each_of_cpu_node(node) {
+ if (!of_device_is_available(node))
+ continue;
+
+ cpu = of_get_cpu_hwid(node, 0);
+ if (cpu >= NR_CPUS)
+ continue;
+
+ set_cpu_possible(cpu, true);
+ set_cpu_present(cpu, true);
+ }
+}
+
+extern void _start_smp_secondary(void);
+
+volatile unsigned int secondary_hint;
+volatile unsigned int secondary_hint2;
+volatile unsigned int secondary_ccr;
+volatile unsigned int secondary_stack;
+volatile unsigned int secondary_msa1;
+volatile unsigned int secondary_pgd;
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ unsigned long mask = 1 << cpu;
+
+ secondary_stack =
+ (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
+ secondary_hint = mfcr("cr31");
+ secondary_hint2 = mfcr("cr<21, 1>");
+ secondary_ccr = mfcr("cr18");
+ secondary_msa1 = read_mmu_msa1();
+ secondary_pgd = mfcr("cr<29, 15>");
+
+ /*
+ * Because other CPUs are in reset status, we must flush data
+ * from cache to out and secondary CPUs use them in
+ * csky_start_secondary(void)
+ */
+ mtcr("cr17", 0x22);
+
+ if (mask & mfcr("cr<29, 0>")) {
+ send_arch_ipi(cpumask_of(cpu));
+ } else {
+ /* Enable cpu in SMP reset ctrl reg */
+ mask |= mfcr("cr<29, 0>");
+ mtcr("cr<29, 0>", mask);
+ }
+
+ /* Wait for the cpu online */
+ while (!cpu_online(cpu));
+
+ secondary_stack = 0;
+
+ return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+void csky_start_secondary(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ mtcr("cr31", secondary_hint);
+ mtcr("cr<21, 1>", secondary_hint2);
+ mtcr("cr18", secondary_ccr);
+
+ mtcr("vbr", vec_base);
+
+ flush_tlb_all();
+ write_mmu_pagemask(0);
+
+#ifdef CONFIG_CPU_HAS_FPU
+ init_fpu();
+#endif
+
+ enable_percpu_irq(ipi_irq, 0);
+
+ mmget(mm);
+ mmgrab(mm);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+
+ notify_cpu_starting(cpu);
+ set_cpu_online(cpu, true);
+
+ pr_info("CPU%u Online: %s...\n", cpu, __func__);
+
+ local_irq_enable();
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ set_cpu_online(cpu, false);
+
+ irq_migrate_all_off_this_cpu();
+
+ clear_tasks_mm_cpumask(cpu);
+
+ return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+ if (!cpu_wait_death(cpu, 5)) {
+ pr_crit("CPU%u: shutdown failed\n", cpu);
+ return;
+ }
+ pr_notice("CPU%u: shutdown\n", cpu);
+}
+
+void arch_cpu_idle_dead(void)
+{
+ idle_task_exit();
+
+ cpu_report_death();
+
+ while (!secondary_stack)
+ arch_cpu_idle();
+
+ local_irq_disable();
+
+ asm volatile(
+ "mov sp, %0\n"
+ "mov r8, %0\n"
+ "jmpi csky_start_secondary"
+ :
+ : "r" (secondary_stack));
+}
+#endif
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c
new file mode 100644
index 000000000..9f78f5d21
--- /dev/null
+++ b/arch/csky/kernel/stacktrace.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+#include <linux/ftrace.h>
+#include <linux/ptrace.h>
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long ra;
+};
+
+void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ bool (*fn)(unsigned long, void *), void *arg)
+{
+ unsigned long fp, sp, pc;
+
+ if (regs) {
+ fp = frame_pointer(regs);
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
+ } else if (task == NULL || task == current) {
+ const register unsigned long current_sp __asm__ ("sp");
+ const register unsigned long current_fp __asm__ ("r8");
+ fp = current_fp;
+ sp = current_sp;
+ pc = (unsigned long)walk_stackframe;
+ } else {
+ /* task blocked in __switch_to */
+ fp = thread_saved_fp(task);
+ sp = thread_saved_sp(task);
+ pc = thread_saved_lr(task);
+ }
+
+ for (;;) {
+ unsigned long low, high;
+ struct stackframe *frame;
+
+ if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
+ break;
+
+ /* Validate frame pointer */
+ low = sp;
+ high = ALIGN(sp, THREAD_SIZE);
+ if (unlikely(fp < low || fp > high || fp & 0x3))
+ break;
+ /* Unwind stack frame */
+ frame = (struct stackframe *)fp;
+ sp = fp;
+ fp = frame->fp;
+ pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
+ (unsigned long *)(fp - 8));
+ }
+}
+
+#else /* !CONFIG_FRAME_POINTER */
+
+static void notrace walk_stackframe(struct task_struct *task,
+ struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
+{
+ unsigned long sp, pc;
+ unsigned long *ksp;
+
+ if (regs) {
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
+ } else if (task == NULL || task == current) {
+ const register unsigned long current_sp __asm__ ("sp");
+ sp = current_sp;
+ pc = (unsigned long)walk_stackframe;
+ } else {
+ /* task blocked in __switch_to */
+ sp = thread_saved_sp(task);
+ pc = thread_saved_lr(task);
+ }
+
+ if (unlikely(sp & 0x3))
+ return;
+
+ ksp = (unsigned long *)sp;
+ while (!kstack_end(ksp)) {
+ if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
+ break;
+ pc = (*ksp++) - 0x4;
+ }
+}
+#endif /* CONFIG_FRAME_POINTER */
+
+static bool print_trace_address(unsigned long pc, void *arg)
+{
+ print_ip_sym((const char *)arg, pc);
+ return false;
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ pr_cont("Call Trace:\n");
+ walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
+}
+
+static bool save_wchan(unsigned long pc, void *arg)
+{
+ if (!in_sched_functions(pc)) {
+ unsigned long *p = arg;
+ *p = pc;
+ return true;
+ }
+ return false;
+}
+
+unsigned long __get_wchan(struct task_struct *task)
+{
+ unsigned long pc = 0;
+
+ walk_stackframe(task, NULL, save_wchan, &pc);
+ return pc;
+}
+
+#ifdef CONFIG_STACKTRACE
+static bool __save_trace(unsigned long pc, void *arg, bool nosched)
+{
+ struct stack_trace *trace = arg;
+
+ if (unlikely(nosched && in_sched_functions(pc)))
+ return false;
+ if (unlikely(trace->skip > 0)) {
+ trace->skip--;
+ return false;
+ }
+
+ trace->entries[trace->nr_entries++] = pc;
+ return (trace->nr_entries >= trace->max_entries);
+}
+
+static bool save_trace(unsigned long pc, void *arg)
+{
+ return __save_trace(pc, arg, false);
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ walk_stackframe(tsk, NULL, save_trace, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(NULL, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif /* CONFIG_STACKTRACE */
diff --git a/arch/csky/kernel/syscall.c b/arch/csky/kernel/syscall.c
new file mode 100644
index 000000000..3d30e58a4
--- /dev/null
+++ b/arch/csky/kernel/syscall.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/syscalls.h>
+
+SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
+{
+ struct thread_info *ti = task_thread_info(current);
+ struct pt_regs *reg = current_pt_regs();
+
+ reg->tls = addr;
+ ti->tp_value = addr;
+
+ return 0;
+}
+
+SYSCALL_DEFINE6(mmap2,
+ unsigned long, addr,
+ unsigned long, len,
+ unsigned long, prot,
+ unsigned long, flags,
+ unsigned long, fd,
+ off_t, offset)
+{
+ if (unlikely(offset & (~PAGE_MASK >> 12)))
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd,
+ offset >> (PAGE_SHIFT - 12));
+}
+
+/*
+ * for abiv1 the 64bits args should be even th, So we need mov the advice
+ * forward.
+ */
+SYSCALL_DEFINE4(csky_fadvise64_64,
+ int, fd,
+ int, advice,
+ loff_t, offset,
+ loff_t, len)
+{
+ return ksys_fadvise64_64(fd, offset, len, advice);
+}
diff --git a/arch/csky/kernel/syscall_table.c b/arch/csky/kernel/syscall_table.c
new file mode 100644
index 000000000..a0c238c53
--- /dev/null
+++ b/arch/csky/kernel/syscall_table.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/syscalls.h>
+#include <asm/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call)[nr] = (call),
+
+#define sys_fadvise64_64 sys_csky_fadvise64_64
+void * const sys_call_table[__NR_syscalls] __page_aligned_data = {
+ [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/csky/kernel/time.c b/arch/csky/kernel/time.c
new file mode 100644
index 000000000..52379d866
--- /dev/null
+++ b/arch/csky/kernel/time.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/clocksource.h>
+#include <linux/of_clk.h>
+
+void __init time_init(void)
+{
+ of_clk_init(NULL);
+ timer_probe();
+}
diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
new file mode 100644
index 000000000..6e426fba0
--- /dev/null
+++ b/arch/csky/kernel/traps.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/user.h>
+#include <linux/string.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+#include <linux/rtc.h>
+#include <linux/uaccess.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/sched/debug.h>
+
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <asm/pgalloc.h>
+#include <asm/siginfo.h>
+
+#include <asm/mmu_context.h>
+
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+#endif
+
+int show_unhandled_signals = 1;
+
+/* Defined in entry.S */
+asmlinkage void csky_trap(void);
+
+asmlinkage void csky_systemcall(void);
+asmlinkage void csky_cmpxchg(void);
+asmlinkage void csky_get_tls(void);
+asmlinkage void csky_irq(void);
+
+asmlinkage void csky_pagefault(void);
+
+/* Defined in head.S */
+asmlinkage void _start_smp_secondary(void);
+
+void __init pre_trap_init(void)
+{
+ int i;
+
+ mtcr("vbr", vec_base);
+
+ for (i = 1; i < 128; i++)
+ VEC_INIT(i, csky_trap);
+}
+
+void __init trap_init(void)
+{
+ VEC_INIT(VEC_AUTOVEC, csky_irq);
+
+ /* setup trap0 trap2 trap3 */
+ VEC_INIT(VEC_TRAP0, csky_systemcall);
+ VEC_INIT(VEC_TRAP2, csky_cmpxchg);
+ VEC_INIT(VEC_TRAP3, csky_get_tls);
+
+ /* setup MMU TLB exception */
+ VEC_INIT(VEC_TLBINVALIDL, csky_pagefault);
+ VEC_INIT(VEC_TLBINVALIDS, csky_pagefault);
+ VEC_INIT(VEC_TLBMODIFIED, csky_pagefault);
+
+#ifdef CONFIG_CPU_HAS_FPU
+ init_fpu();
+#endif
+
+#ifdef CONFIG_SMP
+ mtcr("cr<28, 0>", virt_to_phys(vec_base));
+
+ VEC_INIT(VEC_RESET, (void *)virt_to_phys(_start_smp_secondary));
+#endif
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(struct pt_regs *regs, const char *str)
+{
+ static int die_counter;
+ int ret;
+
+ oops_enter();
+
+ spin_lock_irq(&die_lock);
+ console_verbose();
+ bust_spinlocks(1);
+
+ pr_emerg("%s [#%d]\n", str, ++die_counter);
+ print_modules();
+ show_regs(regs);
+ show_stack(current, (unsigned long *)regs->regs[4], KERN_INFO);
+
+ ret = notify_die(DIE_OOPS, str, regs, 0, trap_no(regs), SIGSEGV);
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irq(&die_lock);
+ oops_exit();
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+ if (ret != NOTIFY_STOP)
+ make_task_dead(SIGSEGV);
+}
+
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
+{
+ struct task_struct *tsk = current;
+
+ if (show_unhandled_signals && unhandled_signal(tsk, signo)
+ && printk_ratelimit()) {
+ pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x%08lx",
+ tsk->comm, task_pid_nr(tsk), signo, code, addr);
+ print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
+ pr_cont("\n");
+ show_regs(regs);
+ }
+
+ force_sig_fault(signo, code, (void __user *)addr);
+}
+
+static void do_trap_error(struct pt_regs *regs, int signo, int code,
+ unsigned long addr, const char *str)
+{
+ current->thread.trap_no = trap_no(regs);
+
+ if (user_mode(regs)) {
+ do_trap(regs, signo, code, addr);
+ } else {
+ if (!fixup_exception(regs))
+ die(regs, str);
+ }
+}
+
+#define DO_ERROR_INFO(name, signo, code, str) \
+asmlinkage __visible void name(struct pt_regs *regs) \
+{ \
+ do_trap_error(regs, signo, code, regs->pc, "Oops - " str); \
+}
+
+DO_ERROR_INFO(do_trap_unknown,
+ SIGILL, ILL_ILLTRP, "unknown exception");
+DO_ERROR_INFO(do_trap_zdiv,
+ SIGFPE, FPE_INTDIV, "error zero div exception");
+DO_ERROR_INFO(do_trap_buserr,
+ SIGSEGV, ILL_ILLADR, "error bus error exception");
+
+asmlinkage void do_trap_misaligned(struct pt_regs *regs)
+{
+#ifdef CONFIG_CPU_NEED_SOFTALIGN
+ csky_alignment(regs);
+#else
+ current->thread.trap_no = trap_no(regs);
+ do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->pc,
+ "Oops - load/store address misaligned");
+#endif
+}
+
+asmlinkage void do_trap_bkpt(struct pt_regs *regs)
+{
+#ifdef CONFIG_KPROBES
+ if (kprobe_single_step_handler(regs))
+ return;
+#endif
+#ifdef CONFIG_UPROBES
+ if (uprobe_single_step_handler(regs))
+ return;
+#endif
+ if (user_mode(regs)) {
+ send_sig(SIGTRAP, current, 0);
+ return;
+ }
+
+ do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->pc,
+ "Oops - illegal trap exception");
+}
+
+asmlinkage void do_trap_illinsn(struct pt_regs *regs)
+{
+ current->thread.trap_no = trap_no(regs);
+
+#ifdef CONFIG_KPROBES
+ if (kprobe_breakpoint_handler(regs))
+ return;
+#endif
+#ifdef CONFIG_UPROBES
+ if (uprobe_breakpoint_handler(regs))
+ return;
+#endif
+#ifndef CONFIG_CPU_NO_USER_BKPT
+ if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT) {
+ send_sig(SIGTRAP, current, 0);
+ return;
+ }
+#endif
+
+ do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
+ "Oops - illegal instruction exception");
+}
+
+asmlinkage void do_trap_fpe(struct pt_regs *regs)
+{
+#ifdef CONFIG_CPU_HAS_FPU
+ return fpu_fpe(regs);
+#else
+ do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
+ "Oops - fpu instruction exception");
+#endif
+}
+
+asmlinkage void do_trap_priv(struct pt_regs *regs)
+{
+#ifdef CONFIG_CPU_HAS_FPU
+ if (user_mode(regs) && fpu_libc_helper(regs))
+ return;
+#endif
+ do_trap_error(regs, SIGILL, ILL_PRVOPC, regs->pc,
+ "Oops - illegal privileged exception");
+}
+
+asmlinkage void trap_c(struct pt_regs *regs)
+{
+ switch (trap_no(regs)) {
+ case VEC_ZERODIV:
+ do_trap_zdiv(regs);
+ break;
+ case VEC_TRACE:
+ do_trap_bkpt(regs);
+ break;
+ case VEC_ILLEGAL:
+ do_trap_illinsn(regs);
+ break;
+ case VEC_TRAP1:
+ case VEC_BREAKPOINT:
+ do_trap_bkpt(regs);
+ break;
+ case VEC_ACCESS:
+ do_trap_buserr(regs);
+ break;
+ case VEC_ALIGN:
+ do_trap_misaligned(regs);
+ break;
+ case VEC_FPE:
+ do_trap_fpe(regs);
+ break;
+ case VEC_PRIV:
+ do_trap_priv(regs);
+ break;
+ default:
+ do_trap_unknown(regs);
+ break;
+ }
+}
diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c
new file mode 100644
index 000000000..16c20d64d
--- /dev/null
+++ b/arch/csky/kernel/vdso.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+#ifdef GENERIC_TIME_VSYSCALL
+#include <vdso/datapage.h>
+#else
+#include <asm/vdso.h>
+#endif
+
+extern char vdso_start[], vdso_end[];
+
+static unsigned int vdso_pages;
+static struct page **vdso_pagelist;
+
+/*
+ * The vDSO data page.
+ */
+static union {
+ struct vdso_data data;
+ u8 page[PAGE_SIZE];
+} vdso_data_store __page_aligned_data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
+
+static int __init vdso_init(void)
+{
+ unsigned int i;
+
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+ vdso_pagelist =
+ kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(vdso_pagelist == NULL)) {
+ pr_err("vdso: pagelist allocation failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < vdso_pages; i++) {
+ struct page *pg;
+
+ pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
+ vdso_pagelist[i] = pg;
+ }
+ vdso_pagelist[i] = virt_to_page(vdso_data);
+
+ return 0;
+}
+arch_initcall(vdso_init);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long vdso_base, vdso_len;
+ int ret;
+
+ vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
+
+ mmap_write_lock(mm);
+ vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
+ if (IS_ERR_VALUE(vdso_base)) {
+ ret = vdso_base;
+ goto end;
+ }
+
+ /*
+ * Put vDSO base into mm struct. We need to do this before calling
+ * install_special_mapping or the perf counter mmap tracking code
+ * will fail to recognise it as a vDSO (since arch_vma_name fails).
+ */
+ mm->context.vdso = (void *)vdso_base;
+
+ ret =
+ install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
+ vdso_pagelist);
+
+ if (unlikely(ret)) {
+ mm->context.vdso = NULL;
+ goto end;
+ }
+
+ vdso_base += (vdso_pages << PAGE_SHIFT);
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+
+ if (unlikely(ret))
+ mm->context.vdso = NULL;
+end:
+ mmap_write_unlock(mm);
+ return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
+ return "[vdso]";
+ if (vma->vm_mm && (vma->vm_start ==
+ (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+ return "[vdso_data]";
+ return NULL;
+}
diff --git a/arch/csky/kernel/vdso/.gitignore b/arch/csky/kernel/vdso/.gitignore
new file mode 100644
index 000000000..3a19def86
--- /dev/null
+++ b/arch/csky/kernel/vdso/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vdso.lds
+*.tmp
+vdso-syms.S
diff --git a/arch/csky/kernel/vdso/Makefile b/arch/csky/kernel/vdso/Makefile
new file mode 100644
index 000000000..0b6909f10
--- /dev/null
+++ b/arch/csky/kernel/vdso/Makefile
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_CKCORE_ADDR32|R_CKCORE_JUMP_SLOT
+include $(srctree)/lib/vdso/Makefile
+
+# Symbols present in the vdso
+vdso-syms += rt_sigreturn
+vdso-syms += vgettimeofday
+
+# Files to link into the vdso
+obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
+ccflags-y := -fno-stack-protector -DBUILD_VDSO32
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+obj-y += vdso.o vdso-syms.o
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Disable gcov profiling for VDSO code
+GCOV_PROFILE := n
+KCOV_INSTRUMENT := n
+
+# Force dependency
+$(obj)/vdso.o: $(obj)/vdso.so
+
+SYSCFLAGS_vdso.so.dbg = $(c_flags)
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+ -Wl,--build-id=sha1 -Wl,--hash-style=both
+
+$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
+ $(call if_changed,so2s)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# actual build commands
+# The DSO images are built using a special linker script
+# Make sure only to export the intended __vdso_xxx symbol offsets.
+quiet_cmd_vdsold = VDSOLD $@
+ cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
+ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
+ $(CROSS_COMPILE)objcopy \
+ $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+ rm $@.tmp
+
+# Extracts symbol offsets from the VDSO, converting them into an assembly file
+# that contains the same symbols at the same offsets.
+quiet_cmd_so2s = SO2S $@
+ cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/csky/kernel/vdso/note.S b/arch/csky/kernel/vdso/note.S
new file mode 100644
index 000000000..2a956c942
--- /dev/null
+++ b/arch/csky/kernel/vdso/note.S
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/elfnote.h>
+#include <linux/version.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/csky/kernel/vdso/rt_sigreturn.S b/arch/csky/kernel/vdso/rt_sigreturn.S
new file mode 100644
index 000000000..0a6bd1216
--- /dev/null
+++ b/arch/csky/kernel/vdso/rt_sigreturn.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+#include <abi/vdso.h>
+
+ .text
+ENTRY(__vdso_rt_sigreturn)
+ .cfi_startproc
+ .cfi_signal_frame
+ SET_SYSCALL_ID
+ trap 0
+ .cfi_endproc
+ENDPROC(__vdso_rt_sigreturn)
diff --git a/arch/csky/kernel/vdso/so2s.sh b/arch/csky/kernel/vdso/so2s.sh
new file mode 100755
index 000000000..69da3d529
--- /dev/null
+++ b/arch/csky/kernel/vdso/so2s.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+
+sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_5.10\)*!.global \2\n.set \2,0x\1!' \
+| grep '^\.'
diff --git a/arch/csky/kernel/vdso/vdso.S b/arch/csky/kernel/vdso/vdso.S
new file mode 100644
index 000000000..5162ca069
--- /dev/null
+++ b/arch/csky/kernel/vdso/vdso.S
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso_start, vdso_end
+ .balign PAGE_SIZE
+vdso_start:
+ .incbin "arch/csky/kernel/vdso/vdso.so"
+ .balign PAGE_SIZE
+vdso_end:
+
+ .previous
diff --git a/arch/csky/kernel/vdso/vdso.lds.S b/arch/csky/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000..590a6c79f
--- /dev/null
+++ b/arch/csky/kernel/vdso/vdso.lds.S
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <asm/page.h>
+
+OUTPUT_ARCH(csky)
+
+SECTIONS
+{
+ PROVIDE(_vdso_data = . + PAGE_SIZE);
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+
+ . = 0x800;
+ .text : { *(.text .text.*) } :text
+
+ .data : {
+ *(.got.plt) *(.got)
+ *(.data .data.* .gnu.linkonce.d.*)
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ }
+}
+
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+VERSION
+{
+ LINUX_5.10 {
+ global:
+ __vdso_rt_sigreturn;
+ __vdso_clock_gettime;
+ __vdso_clock_gettime64;
+ __vdso_gettimeofday;
+ __vdso_clock_getres;
+ local: *;
+ };
+}
diff --git a/arch/csky/kernel/vdso/vgettimeofday.c b/arch/csky/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000..da491832c
--- /dev/null
+++ b/arch/csky/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/time.h>
+#include <linux/types.h>
+
+int __vdso_clock_gettime(clockid_t clock,
+ struct old_timespec32 *ts)
+{
+ return __cvdso_clock_gettime32(clock, ts);
+}
+
+int __vdso_clock_gettime64(clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id,
+ struct old_timespec32 *res)
+{
+ return __cvdso_clock_getres_time32(clock_id, res);
+}
diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..68c980d08
--- /dev/null
+++ b/arch/csky/kernel/vmlinux.lds.S
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/memory.h>
+
+OUTPUT_ARCH(csky)
+ENTRY(_start)
+
+#ifndef __cskyBE__
+jiffies = jiffies_64;
+#else
+jiffies = jiffies_64 + 4;
+#endif
+
+#define VBR_BASE \
+ . = ALIGN(1024); \
+ vec_base = .; \
+ . += 512;
+
+SECTIONS
+{
+ . = PAGE_OFFSET + PHYS_OFFSET_OFFSET;
+
+ _start = .;
+ HEAD_TEXT_SECTION
+ . = ALIGN(PAGE_SIZE);
+
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ _text = .;
+ _stext = .;
+ VBR_BASE
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } = 0
+ _etext = .;
+
+ /* __init_begin __init_end must be page aligned for free_initmem */
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(PAGE_SIZE)
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
+ _sdata = .;
+ RO_DATA(PAGE_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+
+#ifdef CONFIG_HAVE_TCM
+ .tcm_start : {
+ . = ALIGN(PAGE_SIZE);
+ __tcm_start = .;
+ }
+
+ .text_data_tcm FIXADDR_TCM : AT(__tcm_start)
+ {
+ . = ALIGN(4);
+ __stcm_text_data = .;
+ *(.tcm.text)
+ *(.tcm.rodata)
+#ifndef CONFIG_HAVE_DTCM
+ *(.tcm.data)
+#endif
+ . = ALIGN(4);
+ __etcm_text_data = .;
+ }
+
+ . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm);
+
+#ifdef CONFIG_HAVE_DTCM
+ #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE
+
+ .dtcm_start : {
+ __dtcm_start = .;
+ }
+
+ .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start)
+ {
+ . = ALIGN(4);
+ __stcm_data = .;
+ *(.tcm.data)
+ . = ALIGN(4);
+ __etcm_data = .;
+ }
+
+ . = ADDR(.dtcm_start) + SIZEOF(.data_tcm);
+
+ .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) {
+#else
+ .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) {
+#endif
+ . = ALIGN(PAGE_SIZE);
+ __tcm_end = .;
+ }
+#endif
+
+ EXCEPTION_TABLE(L1_CACHE_BYTES)
+ BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
+ _end = . ;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
+ DISCARDS
+}