summaryrefslogtreecommitdiffstats
path: root/arch/csky/include
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /arch/csky/include
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--arch/csky/include/asm/Kbuild12
-rw-r--r--arch/csky/include/asm/addrspace.h9
-rw-r--r--arch/csky/include/asm/asid.h78
-rw-r--r--arch/csky/include/asm/atomic.h202
-rw-r--r--arch/csky/include/asm/barrier.h88
-rw-r--r--arch/csky/include/asm/bitops.h79
-rw-r--r--arch/csky/include/asm/bug.h28
-rw-r--r--arch/csky/include/asm/cache.h32
-rw-r--r--arch/csky/include/asm/cacheflush.h9
-rw-r--r--arch/csky/include/asm/checksum.h49
-rw-r--r--arch/csky/include/asm/clocksource.h8
-rw-r--r--arch/csky/include/asm/cmpxchg.h155
-rw-r--r--arch/csky/include/asm/elf.h90
-rw-r--r--arch/csky/include/asm/fixmap.h33
-rw-r--r--arch/csky/include/asm/ftrace.h30
-rw-r--r--arch/csky/include/asm/futex.h121
-rw-r--r--arch/csky/include/asm/highmem.h44
-rw-r--r--arch/csky/include/asm/io.h54
-rw-r--r--arch/csky/include/asm/irq_work.h11
-rw-r--r--arch/csky/include/asm/irqflags.h49
-rw-r--r--arch/csky/include/asm/jump_label.h52
-rw-r--r--arch/csky/include/asm/kprobes.h48
-rw-r--r--arch/csky/include/asm/memory.h25
-rw-r--r--arch/csky/include/asm/mmu.h12
-rw-r--r--arch/csky/include/asm/mmu_context.h39
-rw-r--r--arch/csky/include/asm/page.h100
-rw-r--r--arch/csky/include/asm/pci.h15
-rw-r--r--arch/csky/include/asm/perf_event.h14
-rw-r--r--arch/csky/include/asm/pgalloc.h74
-rw-r--r--arch/csky/include/asm/pgtable.h274
-rw-r--r--arch/csky/include/asm/probes.h24
-rw-r--r--arch/csky/include/asm/processor.h87
-rw-r--r--arch/csky/include/asm/ptrace.h102
-rw-r--r--arch/csky/include/asm/reg_ops.h26
-rw-r--r--arch/csky/include/asm/seccomp.h11
-rw-r--r--arch/csky/include/asm/sections.h12
-rw-r--r--arch/csky/include/asm/shmparam.h10
-rw-r--r--arch/csky/include/asm/smp.h30
-rw-r--r--arch/csky/include/asm/spinlock.h12
-rw-r--r--arch/csky/include/asm/spinlock_types.h9
-rw-r--r--arch/csky/include/asm/stackprotector.h21
-rw-r--r--arch/csky/include/asm/string.h12
-rw-r--r--arch/csky/include/asm/switch_to.h35
-rw-r--r--arch/csky/include/asm/syscall.h68
-rw-r--r--arch/csky/include/asm/syscalls.h14
-rw-r--r--arch/csky/include/asm/tcm.h24
-rw-r--r--arch/csky/include/asm/thread_info.h89
-rw-r--r--arch/csky/include/asm/tlb.h9
-rw-r--r--arch/csky/include/asm/tlbflush.h24
-rw-r--r--arch/csky/include/asm/traps.h60
-rw-r--r--arch/csky/include/asm/uaccess.h203
-rw-r--r--arch/csky/include/asm/unistd.h5
-rw-r--r--arch/csky/include/asm/uprobes.h33
-rw-r--r--arch/csky/include/asm/vdso.h27
-rw-r--r--arch/csky/include/asm/vdso/clocksource.h9
-rw-r--r--arch/csky/include/asm/vdso/gettimeofday.h114
-rw-r--r--arch/csky/include/asm/vdso/processor.h12
-rw-r--r--arch/csky/include/asm/vdso/vsyscall.h22
-rw-r--r--arch/csky/include/asm/vmalloc.h4
-rw-r--r--arch/csky/include/uapi/asm/Kbuild2
-rw-r--r--arch/csky/include/uapi/asm/byteorder.h8
-rw-r--r--arch/csky/include/uapi/asm/cachectl.h13
-rw-r--r--arch/csky/include/uapi/asm/perf_regs.h50
-rw-r--r--arch/csky/include/uapi/asm/ptrace.h51
-rw-r--r--arch/csky/include/uapi/asm/sigcontext.h13
-rw-r--r--arch/csky/include/uapi/asm/unistd.h14
66 files changed, 3093 insertions, 0 deletions
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
new file mode 100644
index 0000000000..1117c28cb7
--- /dev/null
+++ b/arch/csky/include/asm/Kbuild
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += asm-offsets.h
+generic-y += extable.h
+generic-y += gpio.h
+generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
+generic-y += qrwlock.h
+generic-y += qrwlock_types.h
+generic-y += qspinlock.h
+generic-y += parport.h
+generic-y += user.h
+generic-y += vmlinux.lds.h
diff --git a/arch/csky/include/asm/addrspace.h b/arch/csky/include/asm/addrspace.h
new file mode 100644
index 0000000000..6fc05d4453
--- /dev/null
+++ b/arch/csky/include/asm/addrspace.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_ADDRSPACE_H
+#define __ASM_CSKY_ADDRSPACE_H
+
+#define KSEG0 0x80000000ul
+#define KSEG0ADDR(a) (((unsigned long)a & 0x1fffffff) | KSEG0)
+
+#endif /* __ASM_CSKY_ADDRSPACE_H */
diff --git a/arch/csky/include/asm/asid.h b/arch/csky/include/asm/asid.h
new file mode 100644
index 0000000000..6ff205a97a
--- /dev/null
+++ b/arch/csky/include/asm/asid.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ASM_ASID_H
+#define __ASM_ASM_ASID_H
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+
+struct asid_info
+{
+ atomic64_t generation;
+ unsigned long *map;
+ atomic64_t __percpu *active;
+ u64 __percpu *reserved;
+ u32 bits;
+ /* Lock protecting the structure */
+ raw_spinlock_t lock;
+ /* Which CPU requires context flush on next call */
+ cpumask_t flush_pending;
+ /* Number of ASID allocated by context (shift value) */
+ unsigned int ctxt_shift;
+ /* Callback to locally flush the context. */
+ void (*flush_cpu_ctxt_cb)(void);
+};
+
+#define NUM_ASIDS(info) (1UL << ((info)->bits))
+#define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift)
+
+#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
+
+void asid_new_context(struct asid_info *info, atomic64_t *pasid,
+ unsigned int cpu, struct mm_struct *mm);
+
+/*
+ * Check the ASID is still valid for the context. If not generate a new ASID.
+ *
+ * @pasid: Pointer to the current ASID batch
+ * @cpu: current CPU ID. Must have been acquired through get_cpu()
+ */
+static inline void asid_check_context(struct asid_info *info,
+ atomic64_t *pasid, unsigned int cpu,
+ struct mm_struct *mm)
+{
+ u64 asid, old_active_asid;
+
+ asid = atomic64_read(pasid);
+
+ /*
+ * The memory ordering here is subtle.
+ * If our active_asid is non-zero and the ASID matches the current
+ * generation, then we update the active_asid entry with a relaxed
+ * cmpxchg. Racing with a concurrent rollover means that either:
+ *
+ * - We get a zero back from the cmpxchg and end up waiting on the
+ * lock. Taking the lock synchronises with the rollover and so
+ * we are forced to see the updated generation.
+ *
+ * - We get a valid ASID back from the cmpxchg, which means the
+ * relaxed xchg in flush_context will treat us as reserved
+ * because atomic RmWs are totally ordered for a given location.
+ */
+ old_active_asid = atomic64_read(&active_asid(info, cpu));
+ if (old_active_asid &&
+ !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
+ atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
+ old_active_asid, asid))
+ return;
+
+ asid_new_context(info, pasid, cpu, mm);
+}
+
+int asid_allocator_init(struct asid_info *info,
+ u32 bits, unsigned int asid_per_ctxt,
+ void (*flush_cpu_ctxt_cb)(void));
+
+#endif
diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h
new file mode 100644
index 0000000000..4dab44f614
--- /dev/null
+++ b/arch/csky/include/asm/atomic.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_ATOMIC_H
+#define __ASM_CSKY_ATOMIC_H
+
+#ifdef CONFIG_SMP
+#include <asm-generic/atomic64.h>
+
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#define __atomic_acquire_fence() __bar_brarw()
+
+#define __atomic_release_fence() __bar_brwaw()
+
+static __always_inline int arch_atomic_read(const atomic_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+
+#define ATOMIC_OP(op) \
+static __always_inline \
+void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ __asm__ __volatile__ ( \
+ "1: ldex.w %0, (%2) \n" \
+ " " #op " %0, %1 \n" \
+ " stex.w %0, (%2) \n" \
+ " bez %0, 1b \n" \
+ : "=&r" (tmp) \
+ : "r" (i), "r" (&v->counter) \
+ : "memory"); \
+}
+
+ATOMIC_OP(add)
+ATOMIC_OP(sub)
+ATOMIC_OP(and)
+ATOMIC_OP( or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
+#define ATOMIC_FETCH_OP(op) \
+static __always_inline \
+int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ register int ret, tmp; \
+ __asm__ __volatile__ ( \
+ "1: ldex.w %0, (%3) \n" \
+ " mov %1, %0 \n" \
+ " " #op " %0, %2 \n" \
+ " stex.w %0, (%3) \n" \
+ " bez %0, 1b \n" \
+ : "=&r" (tmp), "=&r" (ret) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ return ret; \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static __always_inline \
+int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
+{ \
+ return arch_atomic_fetch_##op##_relaxed(i, v) c_op i; \
+}
+
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_FETCH_OP(op) \
+ ATOMIC_OP_RETURN(op, c_op)
+
+ATOMIC_OPS(add, +)
+ATOMIC_OPS(sub, -)
+
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+
+#define ATOMIC_OPS(op) \
+ ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS( or)
+ATOMIC_OPS(xor)
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#undef ATOMIC_OPS
+
+#undef ATOMIC_FETCH_OP
+
+static __always_inline int
+arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int prev, tmp;
+
+ __asm__ __volatile__ (
+ RELEASE_FENCE
+ "1: ldex.w %0, (%3) \n"
+ " cmpne %0, %4 \n"
+ " bf 2f \n"
+ " mov %1, %0 \n"
+ " add %1, %2 \n"
+ " stex.w %1, (%3) \n"
+ " bez %1, 1b \n"
+ FULL_FENCE
+ "2:\n"
+ : "=&r" (prev), "=&r" (tmp)
+ : "r" (a), "r" (&v->counter), "r" (u)
+ : "memory");
+
+ return prev;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+
+static __always_inline bool
+arch_atomic_inc_unless_negative(atomic_t *v)
+{
+ int rc, tmp;
+
+ __asm__ __volatile__ (
+ RELEASE_FENCE
+ "1: ldex.w %0, (%2) \n"
+ " movi %1, 0 \n"
+ " blz %0, 2f \n"
+ " movi %1, 1 \n"
+ " addi %0, 1 \n"
+ " stex.w %0, (%2) \n"
+ " bez %0, 1b \n"
+ FULL_FENCE
+ "2:\n"
+ : "=&r" (tmp), "=&r" (rc)
+ : "r" (&v->counter)
+ : "memory");
+
+ return tmp ? true : false;
+
+}
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+
+static __always_inline bool
+arch_atomic_dec_unless_positive(atomic_t *v)
+{
+ int rc, tmp;
+
+ __asm__ __volatile__ (
+ RELEASE_FENCE
+ "1: ldex.w %0, (%2) \n"
+ " movi %1, 0 \n"
+ " bhz %0, 2f \n"
+ " movi %1, 1 \n"
+ " subi %0, 1 \n"
+ " stex.w %0, (%2) \n"
+ " bez %0, 1b \n"
+ FULL_FENCE
+ "2:\n"
+ : "=&r" (tmp), "=&r" (rc)
+ : "r" (&v->counter)
+ : "memory");
+
+ return tmp ? true : false;
+}
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+
+static __always_inline int
+arch_atomic_dec_if_positive(atomic_t *v)
+{
+ int dec, tmp;
+
+ __asm__ __volatile__ (
+ RELEASE_FENCE
+ "1: ldex.w %0, (%2) \n"
+ " subi %1, %0, 1 \n"
+ " blz %1, 2f \n"
+ " stex.w %1, (%2) \n"
+ " bez %1, 1b \n"
+ FULL_FENCE
+ "2:\n"
+ : "=&r" (dec), "=&r" (tmp)
+ : "r" (&v->counter)
+ : "memory");
+
+ return dec - 1;
+}
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+
+#else
+#include <asm-generic/atomic.h>
+#endif
+
+#endif /* __ASM_CSKY_ATOMIC_H */
diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h
new file mode 100644
index 0000000000..15de58b10a
--- /dev/null
+++ b/arch/csky/include/asm/barrier.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_BARRIER_H
+#define __ASM_CSKY_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#define nop() asm volatile ("nop\n":::"memory")
+
+#ifdef CONFIG_SMP
+
+/*
+ * bar.brwarws: ordering barrier for all load/store instructions
+ * before/after
+ *
+ * |31|30 26|25 21|20 16|15 10|9 5|4 0|
+ * 1 10000 00000 00000 100001 00001 0 bw br aw ar
+ *
+ * b: before
+ * a: after
+ * r: read
+ * w: write
+ *
+ * Here are all combinations:
+ *
+ * bar.brw
+ * bar.br
+ * bar.bw
+ * bar.arw
+ * bar.ar
+ * bar.aw
+ * bar.brwarw
+ * bar.brarw
+ * bar.bwarw
+ * bar.brwar
+ * bar.brwaw
+ * bar.brar
+ * bar.bwaw
+ */
+#define FULL_FENCE ".long 0x842fc000\n"
+#define ACQUIRE_FENCE ".long 0x8427c000\n"
+#define RELEASE_FENCE ".long 0x842ec000\n"
+
+#define __bar_brw() asm volatile (".long 0x842cc000\n":::"memory")
+#define __bar_br() asm volatile (".long 0x8424c000\n":::"memory")
+#define __bar_bw() asm volatile (".long 0x8428c000\n":::"memory")
+#define __bar_arw() asm volatile (".long 0x8423c000\n":::"memory")
+#define __bar_ar() asm volatile (".long 0x8421c000\n":::"memory")
+#define __bar_aw() asm volatile (".long 0x8422c000\n":::"memory")
+#define __bar_brwarw() asm volatile (FULL_FENCE:::"memory")
+#define __bar_brarw() asm volatile (ACQUIRE_FENCE:::"memory")
+#define __bar_bwarw() asm volatile (".long 0x842bc000\n":::"memory")
+#define __bar_brwar() asm volatile (".long 0x842dc000\n":::"memory")
+#define __bar_brwaw() asm volatile (RELEASE_FENCE:::"memory")
+#define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory")
+#define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory")
+#define __bar_bwaw() asm volatile (".long 0x842ac000\n":::"memory")
+
+#define __smp_mb() __bar_brwarw()
+#define __smp_rmb() __bar_brar()
+#define __smp_wmb() __bar_bwaw()
+
+#define __smp_acquire_fence() __bar_brarw()
+#define __smp_release_fence() __bar_brwaw()
+
+#endif /* CONFIG_SMP */
+
+/*
+ * sync: completion barrier, all sync.xx instructions
+ * guarantee the last response received by bus transaction
+ * made by ld/st instructions before sync.s
+ * sync.s: inherit from sync, but also shareable to other cores
+ * sync.i: inherit from sync, but also flush cpu pipeline
+ * sync.is: the same with sync.i + sync.s
+ */
+#define mb() asm volatile ("sync\n":::"memory")
+
+#ifdef CONFIG_CPU_HAS_CACHEV2
+/*
+ * Using three sync.is to prevent speculative PTW
+ */
+#define sync_is() asm volatile ("sync.is\nsync.is\nsync.is\n":::"memory")
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_CSKY_BARRIER_H */
diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h
new file mode 100644
index 0000000000..72e1b2aa29
--- /dev/null
+++ b/arch/csky/include/asm/bitops.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_BITOPS_H
+#define __ASM_CSKY_BITOPS_H
+
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+/*
+ * asm-generic/bitops/ffs.h
+ */
+static inline int ffs(int x)
+{
+ if (!x)
+ return 0;
+
+ asm volatile (
+ "brev %0\n"
+ "ff1 %0\n"
+ "addi %0, 1\n"
+ : "=&r"(x)
+ : "0"(x));
+ return x;
+}
+
+/*
+ * asm-generic/bitops/__ffs.h
+ */
+static __always_inline unsigned long __ffs(unsigned long x)
+{
+ asm volatile (
+ "brev %0\n"
+ "ff1 %0\n"
+ : "=&r"(x)
+ : "0"(x));
+ return x;
+}
+
+/*
+ * asm-generic/bitops/fls.h
+ */
+static __always_inline int fls(unsigned int x)
+{
+ asm volatile(
+ "ff1 %0\n"
+ : "=&r"(x)
+ : "0"(x));
+
+ return (32 - x);
+}
+
+/*
+ * asm-generic/bitops/__fls.h
+ */
+static __always_inline unsigned long __fls(unsigned long x)
+{
+ return fls(x) - 1;
+}
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls64.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/atomic.h>
+
+/*
+ * bug fix, why only could use atomic!!!!
+ */
+#include <asm-generic/bitops/non-atomic.h>
+
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#endif /* __ASM_CSKY_BITOPS_H */
diff --git a/arch/csky/include/asm/bug.h b/arch/csky/include/asm/bug.h
new file mode 100644
index 0000000000..03f1a5f918
--- /dev/null
+++ b/arch/csky/include/asm/bug.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_BUG_H
+#define __ASM_CSKY_BUG_H
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#define BUG() \
+do { \
+ asm volatile ("bkpt\n"); \
+ unreachable(); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+
+void die(struct pt_regs *regs, const char *str);
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
+
+void show_regs(struct pt_regs *regs);
+void show_code(struct pt_regs *regs);
+
+#endif /* __ASM_CSKY_BUG_H */
diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h
new file mode 100644
index 0000000000..4b5c09bf1d
--- /dev/null
+++ b/arch/csky/include/asm/cache.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CACHE_H
+#define __ASM_CSKY_CACHE_H
+
+/* bytes per L1 cache line */
+#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#ifndef __ASSEMBLY__
+
+void dcache_wb_line(unsigned long start);
+
+void icache_inv_range(unsigned long start, unsigned long end);
+void icache_inv_all(void);
+void local_icache_inv_all(void *priv);
+
+void dcache_wb_range(unsigned long start, unsigned long end);
+void dcache_wbinv_all(void);
+
+void cache_wbinv_range(unsigned long start, unsigned long end);
+void cache_wbinv_all(void);
+
+void dma_wbinv_range(unsigned long start, unsigned long end);
+void dma_inv_range(unsigned long start, unsigned long end);
+void dma_wb_range(unsigned long start, unsigned long end);
+
+#endif
+#endif /* __ASM_CSKY_CACHE_H */
diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h
new file mode 100644
index 0000000000..d0f9eafe89
--- /dev/null
+++ b/arch/csky/include/asm/cacheflush.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CACHEFLUSH_H
+#define __ASM_CSKY_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <abi/cacheflush.h>
+
+#endif /* __ASM_CSKY_CACHEFLUSH_H */
diff --git a/arch/csky/include/asm/checksum.h b/arch/csky/include/asm/checksum.h
new file mode 100644
index 0000000000..aa12ef4b90
--- /dev/null
+++ b/arch/csky/include/asm/checksum.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CHECKSUM_H
+#define __ASM_CSKY_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <asm/byteorder.h>
+
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 tmp;
+
+ asm volatile(
+ "mov %1, %0\n"
+ "rori %0, 16\n"
+ "addu %0, %1\n"
+ "lsri %0, 16\n"
+ : "=r"(csum), "=r"(tmp)
+ : "0"(csum));
+
+ return (__force __sum16) ~csum;
+}
+#define csum_fold csum_fold
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len, unsigned short proto, __wsum sum)
+{
+ asm volatile(
+ "clrc\n"
+ "addc %0, %1\n"
+ "addc %0, %2\n"
+ "addc %0, %3\n"
+ "inct %0\n"
+ : "=r"(sum)
+ : "r"((__force u32)saddr), "r"((__force u32)daddr),
+#ifdef __BIG_ENDIAN
+ "r"(proto + len),
+#else
+ "r"((proto + len) << 8),
+#endif
+ "0" ((__force unsigned long)sum)
+ : "cc");
+ return sum;
+}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* __ASM_CSKY_CHECKSUM_H */
diff --git a/arch/csky/include/asm/clocksource.h b/arch/csky/include/asm/clocksource.h
new file mode 100644
index 0000000000..54da0e49ef
--- /dev/null
+++ b/arch/csky/include/asm/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_CSKY_CLOCKSOURCE_H
+#define __ASM_VDSO_CSKY_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
new file mode 100644
index 0000000000..916043b845
--- /dev/null
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CMPXCHG_H
+#define __ASM_CSKY_CMPXCHG_H
+
+#ifdef CONFIG_SMP
+#include <linux/bug.h>
+#include <asm/barrier.h>
+
+#define __xchg_relaxed(new, ptr, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ unsigned long tmp; \
+ switch (size) { \
+ case 2: { \
+ u32 ret; \
+ u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \
+ u32 mask = 0xffff << shif; \
+ __ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \
+ __asm__ __volatile__ ( \
+ "1: ldex.w %0, (%4)\n" \
+ " and %1, %0, %2\n" \
+ " or %1, %1, %3\n" \
+ " stex.w %1, (%4)\n" \
+ " bez %1, 1b\n" \
+ : "=&r" (ret), "=&r" (tmp) \
+ : "r" (~mask), \
+ "r" ((u32)__new << shif), \
+ "r" (__ptr) \
+ : "memory"); \
+ __ret = (__typeof__(*(ptr))) \
+ ((ret & mask) >> shif); \
+ break; \
+ } \
+ case 4: \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " mov %1, %2 \n" \
+ " stex.w %1, (%3) \n" \
+ " bez %1, 1b \n" \
+ : "=&r" (__ret), "=&r" (tmp) \
+ : "r" (__new), "r"(__ptr) \
+ :); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_xchg_relaxed(ptr, x) \
+ (__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
+
+#define __cmpxchg_relaxed(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(new) __tmp; \
+ __typeof__(old) __old = (old); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " cmpne %0, %4 \n" \
+ " bt 2f \n" \
+ " mov %1, %2 \n" \
+ " stex.w %1, (%3) \n" \
+ " bez %1, 1b \n" \
+ "2: \n" \
+ : "=&r" (__ret), "=&r" (__tmp) \
+ : "r" (__new), "r"(__ptr), "r"(__old) \
+ :); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg_relaxed(ptr, o, n) \
+ (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+
+#define __cmpxchg_acquire(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(new) __tmp; \
+ __typeof__(old) __old = (old); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " cmpne %0, %4 \n" \
+ " bt 2f \n" \
+ " mov %1, %2 \n" \
+ " stex.w %1, (%3) \n" \
+ " bez %1, 1b \n" \
+ ACQUIRE_FENCE \
+ "2: \n" \
+ : "=&r" (__ret), "=&r" (__tmp) \
+ : "r" (__new), "r"(__ptr), "r"(__old) \
+ :); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg_acquire(ptr, o, n) \
+ (__cmpxchg_acquire((ptr), (o), (n), sizeof(*(ptr))))
+
+#define __cmpxchg(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(new) __tmp; \
+ __typeof__(old) __old = (old); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ asm volatile ( \
+ RELEASE_FENCE \
+ "1: ldex.w %0, (%3) \n" \
+ " cmpne %0, %4 \n" \
+ " bt 2f \n" \
+ " mov %1, %2 \n" \
+ " stex.w %1, (%3) \n" \
+ " bez %1, 1b \n" \
+ FULL_FENCE \
+ "2: \n" \
+ : "=&r" (__ret), "=&r" (__tmp) \
+ : "r" (__new), "r"(__ptr), "r"(__old) \
+ :); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg(ptr, o, n) \
+ (__cmpxchg((ptr), (o), (n), sizeof(*(ptr))))
+
+#define arch_cmpxchg_local(ptr, o, n) \
+ (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+#else
+#include <asm-generic/cmpxchg.h>
+#endif
+
+#endif /* __ASM_CSKY_CMPXCHG_H */
diff --git a/arch/csky/include/asm/elf.h b/arch/csky/include/asm/elf.h
new file mode 100644
index 0000000000..48b83e283e
--- /dev/null
+++ b/arch/csky/include/asm/elf.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_ELF_H
+#define __ASM_CSKY_ELF_H
+
+#include <asm/ptrace.h>
+#include <abi/regdef.h>
+
+#define ELF_ARCH EM_CSKY
+#define EM_CSKY_OLD 39
+
+/* CSKY Relocations */
+#define R_CSKY_NONE 0
+#define R_CSKY_32 1
+#define R_CSKY_PCIMM8BY4 2
+#define R_CSKY_PCIMM11BY2 3
+#define R_CSKY_PCIMM4BY2 4
+#define R_CSKY_PC32 5
+#define R_CSKY_PCRELJSR_IMM11BY2 6
+#define R_CSKY_GNU_VTINHERIT 7
+#define R_CSKY_GNU_VTENTRY 8
+#define R_CSKY_RELATIVE 9
+#define R_CSKY_COPY 10
+#define R_CSKY_GLOB_DAT 11
+#define R_CSKY_JUMP_SLOT 12
+#define R_CSKY_ADDR_HI16 24
+#define R_CSKY_ADDR_LO16 25
+#define R_CSKY_PCRELJSR_IMM26BY2 40
+
+typedef unsigned long elf_greg_t;
+
+typedef struct user_fp elf_fpregset_t;
+
+/*
+ * In gdb/bfd elf32-csky.c, csky_elf_grok_prstatus() use fixed size of
+ * elf_prstatus. It's 148 for abiv1 and 220 for abiv2, the size is enough
+ * for coredump and no need full sizeof(struct pt_regs).
+ */
+#define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 2)
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) || \
+ ((x)->e_machine == EM_CSKY_OLD))
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_EXEC_PAGESIZE 4096
+#define ELF_CLASS ELFCLASS32
+#define ELF_PLAT_INIT(_r, load_addr) { _r->a0 = 0; }
+
+#ifdef __cskyBE__
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE 0x0UL
+#include <abi/elf.h>
+
+/* Similar, but for a thread other than current. */
+struct task_struct;
+extern int dump_task_regs(struct task_struct *tsk, elf_gregset_t *elf_regs);
+#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
+
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation specific
+ * libraries for optimization. This is more specific in intent than poking
+ * at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#endif /* __ASM_CSKY_ELF_H */
diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h
new file mode 100644
index 0000000000..49a77cbbe2
--- /dev/null
+++ b/arch/csky/include/asm/fixmap.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_FIXMAP_H
+#define __ASM_CSKY_FIXMAP_H
+
+#include <asm/page.h>
+#include <asm/memory.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_size.h>
+#endif
+
+enum fixed_addresses {
+#ifdef CONFIG_HAVE_TCM
+ FIX_TCM = TCM_NR_PAGES,
+#endif
+#ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+#endif
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#include <asm-generic/fixmap.h>
+
+extern void fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base);
+extern void __init fixaddr_init(void);
+
+#endif /* __ASM_CSKY_FIXMAP_H */
diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h
new file mode 100644
index 0000000000..9b86341731
--- /dev/null
+++ b/arch/csky/include/asm/ftrace.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_FTRACE_H
+#define __ASM_CSKY_FTRACE_H
+
+#define MCOUNT_INSN_SIZE 14
+
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+
+#define MCOUNT_ADDR ((unsigned long)_mcount)
+
+#ifndef __ASSEMBLY__
+
+extern void _mcount(unsigned long);
+
+extern void ftrace_graph_call(void);
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_CSKY_FTRACE_H */
diff --git a/arch/csky/include/asm/futex.h b/arch/csky/include/asm/futex.h
new file mode 100644
index 0000000000..6cfd312723
--- /dev/null
+++ b/arch/csky/include/asm/futex.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_FUTEX_H
+#define __ASM_CSKY_FUTEX_H
+
+#ifndef CONFIG_SMP
+#include <asm-generic/futex.h>
+#else
+#include <linux/atomic.h>
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ u32 tmp; \
+ \
+ __atomic_pre_full_fence(); \
+ \
+ __asm__ __volatile__ ( \
+ "1: ldex.w %[ov], %[u] \n" \
+ " "insn" \n" \
+ "2: stex.w %[t], %[u] \n" \
+ " bez %[t], 1b \n" \
+ " br 4f \n" \
+ "3: mov %[r], %[e] \n" \
+ "4: \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .balign 4 \n" \
+ " .long 1b, 3b \n" \
+ " .long 2b, 3b \n" \
+ " .previous \n" \
+ : [r] "+r" (ret), [ov] "=&r" (oldval), \
+ [u] "+m" (*uaddr), [t] "=&r" (tmp) \
+ : [op] "Jr" (oparg), [e] "jr" (-EFAULT) \
+ : "memory"); \
+ \
+ __atomic_post_full_fence(); \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret = 0;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %[t], %[ov]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %[t], %[ov], %[op]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %[t], %[ov], %[op]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %[t], %[ov], %[op]",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %[t], %[ov], %[op]",
+ ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val, tmp;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __atomic_pre_full_fence();
+
+ __asm__ __volatile__ (
+ "1: ldex.w %[v], %[u] \n"
+ " cmpne %[v], %[ov] \n"
+ " bt 4f \n"
+ " mov %[t], %[nv] \n"
+ "2: stex.w %[t], %[u] \n"
+ " bez %[t], 1b \n"
+ " br 4f \n"
+ "3: mov %[r], %[e] \n"
+ "4: \n"
+ " .section __ex_table,\"a\" \n"
+ " .balign 4 \n"
+ " .long 1b, 3b \n"
+ " .long 2b, 3b \n"
+ " .previous \n"
+ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr),
+ [t] "=&r" (tmp)
+ : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "Jr" (-EFAULT)
+ : "memory");
+
+ __atomic_post_full_fence();
+
+ *uval = val;
+ return ret;
+}
+
+#endif /* CONFIG_SMP */
+#endif /* __ASM_CSKY_FUTEX_H */
diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h
new file mode 100644
index 0000000000..1ed810effb
--- /dev/null
+++ b/arch/csky/include/asm/highmem.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_HIGHMEM_H
+#define __ASM_CSKY_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <asm/kmap_size.h>
+#include <asm/cache.h>
+
+/* undef for production */
+#define HIGHMEM_DEBUG 1
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *pkmap_page_table;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#define LAST_PKMAP 1024
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define ARCH_HAS_KMAP_FLUSH_TLB
+extern void kmap_flush_tlb(unsigned long addr);
+
+#define flush_cache_kmaps() do {} while (0)
+
+#define arch_kmap_local_post_map(vaddr, pteval) kmap_flush_tlb(vaddr)
+#define arch_kmap_local_post_unmap(vaddr) kmap_flush_tlb(vaddr)
+
+extern void kmap_init(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_CSKY_HIGHMEM_H */
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
new file mode 100644
index 0000000000..4725bb977b
--- /dev/null
+++ b/arch/csky/include/asm/io.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_IO_H
+#define __ASM_CSKY_IO_H
+
+#include <linux/pgtable.h>
+#include <linux/types.h>
+
+/*
+ * I/O memory access primitives. Reads are ordered relative to any
+ * following Normal memory access. Writes are ordered relative to any prior
+ * Normal memory access.
+ *
+ * For CACHEV1 (807, 810), store instruction could fast retire, so we need
+ * another mb() to prevent st fast retire.
+ *
+ * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't
+ * fast retire.
+ */
+#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; })
+
+#ifdef CONFIG_CPU_HAS_CACHEV2
+#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); })
+#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); })
+#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); })
+#else
+#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); })
+#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); })
+#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
+#endif
+
+/*
+ * String version of I/O memory access operations.
+ */
+extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t);
+extern void __memcpy_toio(volatile void __iomem *, const void *, size_t);
+extern void __memset_io(volatile void __iomem *, int, size_t);
+
+#define memset_io(c,v,l) __memset_io((c),(v),(l))
+#define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l))
+#define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l))
+
+/*
+ * I/O memory mapping functions.
+ */
+#define ioremap_wc(addr, size) \
+ ioremap_prot((addr), (size), \
+ (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)
+
+#include <asm-generic/io.h>
+
+#endif /* __ASM_CSKY_IO_H */
diff --git a/arch/csky/include/asm/irq_work.h b/arch/csky/include/asm/irq_work.h
new file mode 100644
index 0000000000..33aaf39d6f
--- /dev/null
+++ b/arch/csky/include/asm/irq_work.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_IRQ_WORK_H
+#define __ASM_CSKY_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return true;
+}
+extern void arch_irq_work_raise(void);
+#endif /* __ASM_CSKY_IRQ_WORK_H */
diff --git a/arch/csky/include/asm/irqflags.h b/arch/csky/include/asm/irqflags.h
new file mode 100644
index 0000000000..9e3a569a55
--- /dev/null
+++ b/arch/csky/include/asm/irqflags.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_IRQFLAGS_H
+#define __ASM_CSKY_IRQFLAGS_H
+#include <abi/reg_ops.h>
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = mfcr("psr");
+ asm volatile("psrclr ie\n":::"memory");
+ return flags;
+}
+#define arch_local_irq_save arch_local_irq_save
+
+static inline void arch_local_irq_enable(void)
+{
+ asm volatile("psrset ee, ie\n":::"memory");
+}
+#define arch_local_irq_enable arch_local_irq_enable
+
+static inline void arch_local_irq_disable(void)
+{
+ asm volatile("psrclr ie\n":::"memory");
+}
+#define arch_local_irq_disable arch_local_irq_disable
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ return mfcr("psr");
+}
+#define arch_local_save_flags arch_local_save_flags
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ mtcr("psr", flags);
+}
+#define arch_local_irq_restore arch_local_irq_restore
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (1<<6));
+}
+#define arch_irqs_disabled_flags arch_irqs_disabled_flags
+
+#include <asm-generic/irqflags.h>
+
+#endif /* __ASM_CSKY_IRQFLAGS_H */
diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h
new file mode 100644
index 0000000000..98a3f4b168
--- /dev/null
+++ b/arch/csky/include/asm/jump_label.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_JUMP_LABEL_H
+#define __ASM_CSKY_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto(
+ "1: nop32 \n"
+ " .pushsection __jump_table, \"aw\" \n"
+ " .align 2 \n"
+ " .long 1b - ., %l[label] - . \n"
+ " .long %0 - . \n"
+ " .popsection \n"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto(
+ "1: bsr32 %l[label] \n"
+ " .pushsection __jump_table, \"aw\" \n"
+ " .align 2 \n"
+ " .long 1b - ., %l[label] - . \n"
+ " .long %0 - . \n"
+ " .popsection \n"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+enum jump_label_type;
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type);
+#define arch_jump_label_transform_static arch_jump_label_transform_static
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_CSKY_JUMP_LABEL_H */
diff --git a/arch/csky/include/asm/kprobes.h b/arch/csky/include/asm/kprobes.h
new file mode 100644
index 0000000000..55267cbf52
--- /dev/null
+++ b/arch/csky/include/asm/kprobes.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_KPROBES_H
+#define __ASM_CSKY_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#ifdef CONFIG_KPROBES
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 1
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+#include <asm/probes.h>
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* Single step context for kprobe */
+struct kprobe_step_ctx {
+ unsigned long ss_pending;
+ unsigned long match_addr;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ unsigned long saved_sr;
+ struct prev_kprobe prev_kprobe;
+ struct kprobe_step_ctx ss_ctx;
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
+int kprobe_breakpoint_handler(struct pt_regs *regs);
+int kprobe_single_step_handler(struct pt_regs *regs);
+void __kretprobe_trampoline(void);
+void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
+
+#endif /* CONFIG_KPROBES */
+#endif /* __ASM_CSKY_KPROBES_H */
diff --git a/arch/csky/include/asm/memory.h b/arch/csky/include/asm/memory.h
new file mode 100644
index 0000000000..d12179801a
--- /dev/null
+++ b/arch/csky/include/asm/memory.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_MEMORY_H
+#define __ASM_CSKY_MEMORY_H
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#define FIXADDR_TOP _AC(0xffffc000, UL)
+#define PKMAP_BASE _AC(0xff800000, UL)
+#define VMALLOC_START (PAGE_OFFSET + LOWMEM_LIMIT + (PAGE_SIZE * 8))
+#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2))
+
+#ifdef CONFIG_HAVE_TCM
+#ifdef CONFIG_HAVE_DTCM
+#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES)
+#else
+#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES)
+#endif
+#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL)
+#endif
+
+#endif
diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h
new file mode 100644
index 0000000000..d78321901d
--- /dev/null
+++ b/arch/csky/include/asm/mmu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_MMU_H
+#define __ASM_CSKY_MMU_H
+
+typedef struct {
+ atomic64_t asid;
+ void *vdso;
+ cpumask_t icache_stale_mask;
+} mm_context_t;
+
+#endif /* __ASM_CSKY_MMU_H */
diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h
new file mode 100644
index 0000000000..95d99b3079
--- /dev/null
+++ b/arch/csky/include/asm/mmu_context.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_MMU_CONTEXT_H
+#define __ASM_CSKY_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <abi/ckmmu.h>
+
+#define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1)
+#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK)
+
+#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; })
+
+void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
+
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (prev != next)
+ check_and_switch_context(next, cpu);
+
+ setup_pgd(next->pgd, next->context.asid.counter);
+
+ flush_icache_deferred(next);
+}
+
+#include <asm-generic/mmu_context.h>
+
+#endif /* __ASM_CSKY_MMU_CONTEXT_H */
diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h
new file mode 100644
index 0000000000..4a0502e324
--- /dev/null
+++ b/arch/csky/include/asm/page.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PAGE_H
+#define __ASM_CSKY_PAGE_H
+
+#include <asm/setup.h>
+#include <asm/cache.h>
+#include <linux/const.h>
+
+/*
+ * PAGE_SHIFT determines the page size: 4KB
+ */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define THREAD_SIZE (PAGE_SIZE * 2)
+#define THREAD_MASK (~(THREAD_SIZE - 1))
+#define THREAD_SHIFT (PAGE_SHIFT + 1)
+
+
+/*
+ * For C-SKY "User-space:Kernel-space" is "2GB:2GB" fixed by hardware and there
+ * are two segment registers (MSA0 + MSA1) to mapping 512MB + 512MB physical
+ * address region. We use them mapping kernel 1GB direct-map address area and
+ * for more than 1GB of memory we use highmem.
+ */
+#define PAGE_OFFSET CONFIG_PAGE_OFFSET
+#define SSEG_SIZE 0x20000000
+#define LOWMEM_LIMIT (SSEG_SIZE * 2)
+
+#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
+
+#ifndef __ASSEMBLY__
+
+#include <linux/pfn.h>
+
+#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \
+ (void *)(kaddr) < high_memory)
+
+extern void *memset(void *dest, int c, size_t l);
+extern void *memcpy(void *to, const void *from, size_t l);
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr)))
+
+struct page;
+
+#include <abi/page.h>
+
+struct vm_area_struct;
+
+typedef struct { unsigned long pte_low; } pte_t;
+#define pte_val(x) ((x).pte_low)
+
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+extern unsigned long va_pa_offset;
+
+#define ARCH_PFN_OFFSET PFN_DOWN(va_pa_offset + PHYS_OFFSET_OFFSET)
+
+#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + va_pa_offset)
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - va_pa_offset))
+
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
+
+static inline void * pfn_to_virt(unsigned long pfn)
+{
+ return (void *)((unsigned long)__va(pfn) << PAGE_SHIFT);
+}
+
+#define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \
+ PHYS_OFFSET_OFFSET)
+#define virt_to_page(x) (mem_map + MAP_NR(x))
+
+#define pfn_to_kaddr(x) __va(PFN_PHYS(x))
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_CSKY_PAGE_H */
diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h
new file mode 100644
index 0000000000..42724c630d
--- /dev/null
+++ b/arch/csky/include/asm/pci.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_PCI_H
+#define __ASM_CSKY_PCI_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+
+/* Generic PCI */
+#include <asm-generic/pci.h>
+
+#endif /* __ASM_CSKY_PCI_H */
diff --git a/arch/csky/include/asm/perf_event.h b/arch/csky/include/asm/perf_event.h
new file mode 100644
index 0000000000..249905d8a4
--- /dev/null
+++ b/arch/csky/include/asm/perf_event.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PERF_EVENT_H
+#define __ASM_CSKY_PERF_EVENT_H
+
+#include <abi/regdef.h>
+
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->pc = (__ip); \
+ regs_fp(regs) = (unsigned long) __builtin_frame_address(0); \
+ asm volatile("mov %0, sp\n":"=r"((regs)->usp)); \
+}
+
+#endif /* __ASM_PERF_EVENT_ELF_H */
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
new file mode 100644
index 0000000000..9c84c9012e
--- /dev/null
+++ b/arch/csky/include/asm/pgalloc.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PGALLOC_H
+#define __ASM_CSKY_PGALLOC_H
+
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#include <asm-generic/pgalloc.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ set_pmd(pmd, __pmd(__pa(pte)));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte)
+{
+ set_pmd(pmd, __pmd(__pa(page_address(pte))));
+}
+
+extern void pgd_init(unsigned long *p);
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ pte_t *pte;
+ unsigned long i;
+
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
+ for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
+ (pte + i)->pte_low = _PAGE_GLOBAL;
+
+ return pte;
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *ret;
+ pgd_t *init;
+
+ ret = (pgd_t *) __get_free_page(GFP_KERNEL);
+ if (ret) {
+ init = pgd_offset(&init_mm, 0UL);
+ pgd_init((unsigned long *)ret);
+ memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ /* prevent out of order excute */
+ smp_mb();
+#ifdef CONFIG_CPU_NEED_TLBSYNC
+ dcache_wb_range((unsigned int)ret,
+ (unsigned int)(ret + PTRS_PER_PGD));
+#endif
+ }
+
+ return ret;
+}
+
+#define __pte_free_tlb(tlb, pte, address) \
+do { \
+ pagetable_pte_dtor(page_ptdesc(pte)); \
+ tlb_remove_page_ptdesc(tlb, page_ptdesc(pte)); \
+} while (0)
+
+extern void pagetable_init(void);
+extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn);
+extern void pre_trap_init(void);
+
+#endif /* __ASM_CSKY_PGALLOC_H */
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
new file mode 100644
index 0000000000..a397e1718a
--- /dev/null
+++ b/arch/csky/include/asm/pgtable.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PGTABLE_H
+#define __ASM_CSKY_PGTABLE_H
+
+#include <asm/fixmap.h>
+#include <asm/memory.h>
+#include <asm/addrspace.h>
+#include <abi/pgtable-bits.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (PAGE_OFFSET/PGDIR_SIZE)
+
+/*
+ * C-SKY is two-level paging structure:
+ */
+
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+#define PFN_PTE_SHIFT PAGE_SHIFT
+#define pmd_pfn(pmd) (pmd_phys(pmd) >> PAGE_SHIFT)
+#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
+#define pte_clear(mm, addr, ptep) set_pte((ptep), \
+ (((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
+#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
+#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
+#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
+ | pgprot_val(prot))
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \
+ pgprot_val(pgprot))
+
+/*
+ * C-SKY only has VALID and DIRTY bit in hardware. So we need to use the
+ * two bits emulate PRESENT, READ, WRITE, EXEC, MODIFIED, ACCESSED.
+ */
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
+
+#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
+#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ | \
+ _CACHE_CACHED)
+#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE | \
+ _CACHE_CACHED)
+#define PAGE_SHARED PAGE_WRITE
+
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
+ _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
+ _PAGE_GLOBAL | \
+ _CACHE_CACHED)
+
+#define _PAGE_IOREMAP (_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
+ _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
+ _PAGE_GLOBAL | \
+ _CACHE_UNCACHED | _PAGE_SO)
+
+#define _PAGE_CHG_MASK (~(unsigned long) \
+ (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _CACHE_MASK | _PAGE_GLOBAL))
+
+#define MAX_SWAPFILES_CHECK() \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
+
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern void load_pgd(unsigned long pg_dir);
+extern pte_t invalid_pte_table[PTRS_PER_PTE];
+
+static inline void set_pte(pte_t *p, pte_t pte)
+{
+ *p = pte;
+#if defined(CONFIG_CPU_NEED_TLBSYNC)
+ dcache_wb_line((u32)p);
+#endif
+ /* prevent out of order excution */
+ smp_mb();
+}
+
+static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+{
+ unsigned long ptr;
+
+ ptr = pmd_val(pmd);
+
+ return __va(ptr);
+}
+
+#define pmd_phys(pmd) pmd_val(pmd)
+
+static inline void set_pmd(pmd_t *p, pmd_t pmd)
+{
+ *p = pmd;
+#if defined(CONFIG_CPU_NEED_TLBSYNC)
+ dcache_wb_line((u32)p);
+#endif
+ /* prevent specul excute */
+ smp_mb();
+}
+
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return pmd_val(pmd) == __pa(invalid_pte_table);
+}
+
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return (pmd_val(pmd) != __pa(invalid_pte_table));
+}
+
+static inline void pmd_clear(pmd_t *p)
+{
+ pmd_val(*p) = (__pa(invalid_pte_table));
+#if defined(CONFIG_CPU_NEED_TLBSYNC)
+ dcache_wb_line((u32)p);
+#endif
+}
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_read(pte_t pte)
+{
+ return pte.pte_low & _PAGE_READ;
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return (pte).pte_low & _PAGE_WRITE;
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return (pte).pte_low & _PAGE_MODIFIED;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return (pte).pte_low & _PAGE_ACCESSED;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID);
+ return pte;
+}
+
+static inline pte_t pte_mkwrite_novma(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pte_val(pte) & _PAGE_MODIFIED)
+ pte_val(pte) |= _PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_MODIFIED;
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte_val(pte) |= _PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_ACCESSED;
+ if (pte_val(pte) & _PAGE_READ)
+ pte_val(pte) |= _PAGE_VALID;
+ return pte;
+}
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+
+/*
+ * Macro to make mark a page protection value as "uncacheable". Note
+ * that "protection" is really a misnomer here as the protection value
+ * contains the memory attribute bits, dirty bits, and various other
+ * bits as well.
+ */
+#define pgprot_noncached pgprot_noncached
+
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
+
+ return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
+
+ return __pgprot(prot);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+ (pgprot_val(newprot)));
+}
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init(void);
+
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte, unsigned int nr);
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#endif /* __ASM_CSKY_PGTABLE_H */
diff --git a/arch/csky/include/asm/probes.h b/arch/csky/include/asm/probes.h
new file mode 100644
index 0000000000..5e526334e6
--- /dev/null
+++ b/arch/csky/include/asm/probes.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PROBES_H
+#define __ASM_CSKY_PROBES_H
+
+typedef u32 probe_opcode_t;
+typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
+
+/* architecture specific copy of original instruction */
+struct arch_probe_insn {
+ probe_opcode_t *insn;
+ probes_handler_t *handler;
+ /* restore address after simulation */
+ unsigned long restore;
+};
+
+#ifdef CONFIG_KPROBES
+typedef u32 kprobe_opcode_t;
+struct arch_specific_insn {
+ struct arch_probe_insn api;
+};
+#endif
+
+#endif /* __ASM_CSKY_PROBES_H */
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
new file mode 100644
index 0000000000..e487a46d1c
--- /dev/null
+++ b/arch/csky/include/asm/processor.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PROCESSOR_H
+#define __ASM_CSKY_PROCESSOR_H
+
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <abi/reg_ops.h>
+#include <abi/regdef.h>
+#include <abi/switch_context.h>
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+#endif
+
+struct cpuinfo_csky {
+ unsigned long asid_cache;
+} __aligned(SMP_CACHE_BYTES);
+
+extern struct cpuinfo_csky cpu_data[];
+
+/*
+ * User space process size: 2GB. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing. TASK_SIZE
+ * for a 64 bit kernel expandable to 8192EB, of which the current CSKY
+ * implementations will "only" be able to use 1TB ...
+ */
+#define TASK_SIZE (PAGE_OFFSET - (PAGE_SIZE * 8))
+
+#ifdef __KERNEL__
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+#endif
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+struct thread_struct {
+ unsigned long sp; /* kernel stack pointer */
+ unsigned long trap_no; /* saved status register */
+
+ /* FPU regs */
+ struct user_fp __aligned(16) user_fp;
+};
+
+#define INIT_THREAD { \
+ .sp = sizeof(init_stack) + (unsigned long) &init_stack, \
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * pass the data segment into user programs if it exists,
+ * it can't hurt anything as far as I can tell
+ */
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ (_regs)->pc = (_pc); \
+ (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \
+ (_regs)->regs[2] = 0; \
+ (_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */ \
+ (_regs)->sr &= ~PS_S; \
+ (_regs)->usp = (_usp); \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+unsigned long __get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp)
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
+
+#define cpu_relax() barrier()
+
+register unsigned long current_stack_pointer __asm__("sp");
+
+#endif /* __ASM_CSKY_PROCESSOR_H */
diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h
new file mode 100644
index 0000000000..0634b7895d
--- /dev/null
+++ b/arch/csky/include/asm/ptrace.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PTRACE_H
+#define __ASM_CSKY_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+#include <asm/traps.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+#define PS_S 0x80000000 /* Supervisor Mode */
+
+#define USR_BKPT 0x1464
+
+#define arch_has_single_step() (1)
+#define current_pt_regs() \
+({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; })
+
+#define user_stack_pointer(regs) ((regs)->usp)
+
+#define user_mode(regs) (!((regs)->sr & PS_S))
+#define instruction_pointer(regs) ((regs)->pc)
+#define profile_pc(regs) instruction_pointer(regs)
+#define trap_no(regs) ((regs->sr >> 16) & 0xff)
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->pc = val;
+}
+
+#if defined(__CSKYABIV2__)
+#define MAX_REG_OFFSET offsetof(struct pt_regs, dcsr)
+#else
+#define MAX_REG_OFFSET offsetof(struct pt_regs, regs[9])
+#endif
+
+static inline bool in_syscall(struct pt_regs const *regs)
+{
+ return ((regs->sr >> 16) & 0xff) == VEC_TRAP0;
+}
+
+static inline void forget_syscall(struct pt_regs *regs)
+{
+ regs->sr &= ~(0xff << 16);
+}
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void regs_set_return_value(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->a0 = val;
+}
+
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->usp;
+}
+
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+ return regs->regs[4];
+}
+static inline void frame_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->regs[4] = val;
+}
+
+extern int regs_query_register_offset(const char *name);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+/*
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs);
+asmlinkage void syscall_trace_exit(struct pt_regs *regs);
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_CSKY_PTRACE_H */
diff --git a/arch/csky/include/asm/reg_ops.h b/arch/csky/include/asm/reg_ops.h
new file mode 100644
index 0000000000..cccf7d525f
--- /dev/null
+++ b/arch/csky/include/asm/reg_ops.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_REGS_OPS_H
+#define __ASM_REGS_OPS_H
+
+#define mfcr(reg) \
+({ \
+ unsigned int tmp; \
+ asm volatile( \
+ "mfcr %0, "reg"\n" \
+ : "=r"(tmp) \
+ : \
+ : "memory"); \
+ tmp; \
+})
+
+#define mtcr(reg, val) \
+({ \
+ asm volatile( \
+ "mtcr %0, "reg"\n" \
+ : \
+ : "r"(val) \
+ : "memory"); \
+})
+
+#endif /* __ASM_REGS_OPS_H */
diff --git a/arch/csky/include/asm/seccomp.h b/arch/csky/include/asm/seccomp.h
new file mode 100644
index 0000000000..d33e758126
--- /dev/null
+++ b/arch/csky/include/asm/seccomp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm-generic/seccomp.h>
+
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_CSKY
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "csky"
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/csky/include/asm/sections.h b/arch/csky/include/asm/sections.h
new file mode 100644
index 0000000000..83e82b7c0f
--- /dev/null
+++ b/arch/csky/include/asm/sections.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_SECTIONS_H
+#define __ASM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _start[];
+
+asmlinkage void csky_start(unsigned int unused, void *dtb_start);
+
+#endif /* __ASM_SECTIONS_H */
diff --git a/arch/csky/include/asm/shmparam.h b/arch/csky/include/asm/shmparam.h
new file mode 100644
index 0000000000..2fe6cea0da
--- /dev/null
+++ b/arch/csky/include/asm/shmparam.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SHMPARAM_H
+#define __ASM_CSKY_SHMPARAM_H
+
+#define SHMLBA (4 * PAGE_SIZE)
+
+#define __ARCH_FORCE_SHMLBA
+
+#endif /* __ASM_CSKY_SHMPARAM_H */
diff --git a/arch/csky/include/asm/smp.h b/arch/csky/include/asm/smp.h
new file mode 100644
index 0000000000..d3db334f31
--- /dev/null
+++ b/arch/csky/include/asm/smp.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SMP_H
+#define __ASM_CSKY_SMP_H
+
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+#include <linux/threads.h>
+
+#ifdef CONFIG_SMP
+
+void __init setup_smp(void);
+
+void __init setup_smp_ipi(void);
+
+void arch_send_call_function_ipi_mask(struct cpumask *mask);
+
+void arch_send_call_function_single_ipi(int cpu);
+
+void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq);
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+int __cpu_disable(void);
+
+static inline void __cpu_die(unsigned int cpu) { }
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_CSKY_SMP_H */
diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h
new file mode 100644
index 0000000000..83a2005341
--- /dev/null
+++ b/arch/csky/include/asm/spinlock.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_H
+#define __ASM_CSKY_SPINLOCK_H
+
+#include <asm/qspinlock.h>
+#include <asm/qrwlock.h>
+
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
+
+#endif /* __ASM_CSKY_SPINLOCK_H */
diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h
new file mode 100644
index 0000000000..75bdf3af80
--- /dev/null
+++ b/arch/csky/include/asm/spinlock_types.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_TYPES_H
+#define __ASM_CSKY_SPINLOCK_TYPES_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
+
+#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */
diff --git a/arch/csky/include/asm/stackprotector.h b/arch/csky/include/asm/stackprotector.h
new file mode 100644
index 0000000000..d237474471
--- /dev/null
+++ b/arch/csky/include/asm/stackprotector.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary = get_random_canary();
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* __ASM_SH_STACKPROTECTOR_H */
diff --git a/arch/csky/include/asm/string.h b/arch/csky/include/asm/string.h
new file mode 100644
index 0000000000..a0d81e9d6b
--- /dev/null
+++ b/arch/csky/include/asm/string.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CSKY_STRING_MM_H_
+#define _CSKY_STRING_MM_H_
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <abi/string.h>
+#endif
+
+#endif /* _CSKY_STRING_MM_H_ */
diff --git a/arch/csky/include/asm/switch_to.h b/arch/csky/include/asm/switch_to.h
new file mode 100644
index 0000000000..731e466415
--- /dev/null
+++ b/arch/csky/include/asm/switch_to.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SWITCH_TO_H
+#define __ASM_CSKY_SWITCH_TO_H
+
+#include <linux/thread_info.h>
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+static inline void __switch_to_fpu(struct task_struct *prev,
+ struct task_struct *next)
+{
+ save_to_user_fp(&prev->thread.user_fp);
+ restore_from_user_fp(&next->thread.user_fp);
+}
+#else
+static inline void __switch_to_fpu(struct task_struct *prev,
+ struct task_struct *next)
+{}
+#endif
+
+/*
+ * Context switching is now performed out-of-line in switch_to.S
+ */
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(prev, next, last) \
+ do { \
+ struct task_struct *__prev = (prev); \
+ struct task_struct *__next = (next); \
+ __switch_to_fpu(__prev, __next); \
+ ((last) = __switch_to((prev), (next))); \
+ } while (0)
+
+#endif /* __ASM_CSKY_SWITCH_TO_H */
diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h
new file mode 100644
index 0000000000..0de5734950
--- /dev/null
+++ b/arch/csky/include/asm/syscall.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_SYSCALL_H
+#define __ASM_SYSCALL_H
+
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <abi/regdef.h>
+#include <uapi/linux/audit.h>
+
+extern void *sys_call_table[];
+
+static inline int
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs_syscallid(regs);
+}
+
+static inline void
+syscall_set_nr(struct task_struct *task, struct pt_regs *regs,
+ int sysno)
+{
+ regs_syscallid(regs) = sysno;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ regs->a0 = regs->orig_a0;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long error = regs->a0;
+
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ regs->a0 = (long) error ?: val;
+}
+
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *args)
+{
+ args[0] = regs->orig_a0;
+ args++;
+ memcpy(args, &regs->a1, 5 * sizeof(args[0]));
+}
+
+static inline int
+syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_CSKY;
+}
+
+#endif /* __ASM_SYSCALL_H */
diff --git a/arch/csky/include/asm/syscalls.h b/arch/csky/include/asm/syscalls.h
new file mode 100644
index 0000000000..ea9ce6138b
--- /dev/null
+++ b/arch/csky/include/asm/syscalls.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SYSCALLS_H
+#define __ASM_CSKY_SYSCALLS_H
+
+#include <asm-generic/syscalls.h>
+
+long sys_cacheflush(void __user *, unsigned long, int);
+
+long sys_set_thread_area(unsigned long addr);
+
+long sys_csky_fadvise64_64(int fd, int advice, loff_t offset, loff_t len);
+
+#endif /* __ASM_CSKY_SYSCALLS_H */
diff --git a/arch/csky/include/asm/tcm.h b/arch/csky/include/asm/tcm.h
new file mode 100644
index 0000000000..bd1e662ecd
--- /dev/null
+++ b/arch/csky/include/asm/tcm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_TCM_H
+#define __ASM_CSKY_TCM_H
+
+#ifndef CONFIG_HAVE_TCM
+#error "You should not be including tcm.h unless you have a TCM!"
+#endif
+
+#include <linux/compiler.h>
+
+/* Tag variables with this */
+#define __tcmdata __section(".tcm.data")
+/* Tag constants with this */
+#define __tcmconst __section(".tcm.rodata")
+/* Tag functions inside TCM called from outside TCM with this */
+#define __tcmfunc __section(".tcm.text") noinline
+/* Tag function inside TCM called from inside TCM with this */
+#define __tcmlocalfunc __section(".tcm.text")
+
+void *tcm_alloc(size_t len);
+void tcm_free(void *addr, size_t len);
+
+#endif
diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h
new file mode 100644
index 0000000000..b5ed788f0c
--- /dev/null
+++ b/arch/csky/include/asm/thread_info.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_CSKY_THREAD_INFO_H
+#define _ASM_CSKY_THREAD_INFO_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <abi/switch_context.h>
+
+struct thread_info {
+ struct task_struct *task;
+ void *dump_exec_domain;
+ unsigned long flags;
+ int preempt_count;
+ unsigned long tp_value;
+ struct restart_block restart_block;
+ struct pt_regs *regs;
+ unsigned int cpu;
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .cpu = 0, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
+
+#define thread_saved_fp(tsk) \
+ ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r8))
+
+#define thread_saved_sp(tsk) \
+ ((unsigned long)(tsk->thread.sp))
+
+#define thread_saved_lr(tsk) \
+ ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r15))
+
+static inline struct thread_info *current_thread_info(void)
+{
+ unsigned long sp;
+
+ asm volatile("mov %0, sp\n":"=r"(sp));
+
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_UPROBE 3 /* uprobe breakpoint or singlestep */
+#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
+#define TIF_SYSCALL_TRACEPOINT 5 /* syscall tracepoint instrumentation */
+#define TIF_SYSCALL_AUDIT 6 /* syscall auditing */
+#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
+#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
+#define TIF_SECCOMP 21 /* secure computing */
+
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_MEMDIE (1 << TIF_MEMDIE)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NOTIFY_SIGNAL)
+
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+
+#endif /* _ASM_CSKY_THREAD_INFO_H */
diff --git a/arch/csky/include/asm/tlb.h b/arch/csky/include/asm/tlb.h
new file mode 100644
index 0000000000..702861c688
--- /dev/null
+++ b/arch/csky/include/asm/tlb.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_TLB_H
+#define __ASM_CSKY_TLB_H
+
+#include <asm/cacheflush.h>
+#include <asm-generic/tlb.h>
+
+#endif /* __ASM_CSKY_TLB_H */
diff --git a/arch/csky/include/asm/tlbflush.h b/arch/csky/include/asm/tlbflush.h
new file mode 100644
index 0000000000..407160b4fd
--- /dev/null
+++ b/arch/csky/include/asm/tlbflush.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_TLBFLUSH_H
+#define __ASM_TLBFLUSH_H
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ */
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+extern void flush_tlb_one(unsigned long vaddr);
+
+#endif
diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h
new file mode 100644
index 0000000000..732c4aaa2e
--- /dev/null
+++ b/arch/csky/include/asm/traps.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_TRAPS_H
+#define __ASM_CSKY_TRAPS_H
+
+#include <linux/linkage.h>
+
+#define VEC_RESET 0
+#define VEC_ALIGN 1
+#define VEC_ACCESS 2
+#define VEC_ZERODIV 3
+#define VEC_ILLEGAL 4
+#define VEC_PRIV 5
+#define VEC_TRACE 6
+#define VEC_BREAKPOINT 7
+#define VEC_UNRECOVER 8
+#define VEC_SOFTRESET 9
+#define VEC_AUTOVEC 10
+#define VEC_FAUTOVEC 11
+#define VEC_HWACCEL 12
+
+#define VEC_TLBMISS 14
+#define VEC_TLBMODIFIED 15
+
+#define VEC_TRAP0 16
+#define VEC_TRAP1 17
+#define VEC_TRAP2 18
+#define VEC_TRAP3 19
+
+#define VEC_TLBINVALIDL 20
+#define VEC_TLBINVALIDS 21
+
+#define VEC_PRFL 29
+#define VEC_FPE 30
+
+extern void *vec_base[];
+
+#define VEC_INIT(i, func) \
+do { \
+ vec_base[i] = (void *)func; \
+} while (0)
+
+void csky_alignment(struct pt_regs *regs);
+
+asmlinkage void do_trap_unknown(struct pt_regs *regs);
+asmlinkage void do_trap_zdiv(struct pt_regs *regs);
+asmlinkage void do_trap_buserr(struct pt_regs *regs);
+asmlinkage void do_trap_misaligned(struct pt_regs *regs);
+asmlinkage void do_trap_bkpt(struct pt_regs *regs);
+asmlinkage void do_trap_illinsn(struct pt_regs *regs);
+asmlinkage void do_trap_fpe(struct pt_regs *regs);
+asmlinkage void do_trap_priv(struct pt_regs *regs);
+asmlinkage void trap_c(struct pt_regs *regs);
+
+asmlinkage void do_notify_resume(struct pt_regs *regs,
+ unsigned long thread_info_flags);
+
+void trap_init(void);
+
+#endif /* __ASM_CSKY_TRAPS_H */
diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h
new file mode 100644
index 0000000000..2e927c21d8
--- /dev/null
+++ b/arch/csky/include/asm/uaccess.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_UACCESS_H
+#define __ASM_CSKY_UACCESS_H
+
+/*
+ * __put_user_fn
+ */
+extern int __put_user_bad(void);
+
+#define __put_user_asm_b(x, ptr, err) \
+do { \
+ int errcode; \
+ __asm__ __volatile__( \
+ "1: stb %1, (%2,0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %3 \n" \
+ " br 3f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b,2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __put_user_asm_h(x, ptr, err) \
+do { \
+ int errcode; \
+ __asm__ __volatile__( \
+ "1: sth %1, (%2,0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %3 \n" \
+ " br 3f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b,2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __put_user_asm_w(x, ptr, err) \
+do { \
+ int errcode; \
+ __asm__ __volatile__( \
+ "1: stw %1, (%2,0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %3 \n" \
+ " br 3f \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __put_user_asm_64(x, ptr, err) \
+do { \
+ int tmp; \
+ int errcode; \
+ \
+ __asm__ __volatile__( \
+ " ldw %3, (%1, 0) \n" \
+ "1: stw %3, (%2, 0) \n" \
+ " ldw %3, (%1, 4) \n" \
+ "2: stw %3, (%2, 4) \n" \
+ " br 4f \n" \
+ "3: mov %0, %4 \n" \
+ " br 4f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 3b \n" \
+ ".long 2b, 3b \n" \
+ ".previous \n" \
+ "4: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), \
+ "=r"(tmp), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(0), \
+ "4"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
+{
+ int retval = 0;
+ u32 tmp;
+
+ switch (size) {
+ case 1:
+ tmp = *(u8 *)x;
+ __put_user_asm_b(tmp, ptr, retval);
+ break;
+ case 2:
+ tmp = *(u16 *)x;
+ __put_user_asm_h(tmp, ptr, retval);
+ break;
+ case 4:
+ tmp = *(u32 *)x;
+ __put_user_asm_w(tmp, ptr, retval);
+ break;
+ case 8:
+ __put_user_asm_64(x, (u64 *)ptr, retval);
+ break;
+ }
+
+ return retval;
+}
+#define __put_user_fn __put_user_fn
+
+/*
+ * __get_user_fn
+ */
+extern int __get_user_bad(void);
+
+#define __get_user_asm_common(x, ptr, ins, err) \
+do { \
+ int errcode; \
+ __asm__ __volatile__( \
+ "1: " ins " %1, (%4, 0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %2 \n" \
+ " movi %1, 0 \n" \
+ " br 3f \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(errcode) \
+ : "0"(0), "r"(ptr), "2"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __get_user_asm_64(x, ptr, err) \
+do { \
+ int tmp; \
+ int errcode; \
+ \
+ __asm__ __volatile__( \
+ "1: ldw %3, (%2, 0) \n" \
+ " stw %3, (%1, 0) \n" \
+ "2: ldw %3, (%2, 4) \n" \
+ " stw %3, (%1, 4) \n" \
+ " br 4f \n" \
+ "3: mov %0, %4 \n" \
+ " br 4f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 3b \n" \
+ ".long 2b, 3b \n" \
+ ".previous \n" \
+ "4: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), \
+ "=r"(tmp), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(0), \
+ "4"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
+{
+ int retval;
+ u32 tmp;
+
+ switch (size) {
+ case 1:
+ __get_user_asm_common(tmp, ptr, "ldb", retval);
+ *(u8 *)x = (u8)tmp;
+ break;
+ case 2:
+ __get_user_asm_common(tmp, ptr, "ldh", retval);
+ *(u16 *)x = (u16)tmp;
+ break;
+ case 4:
+ __get_user_asm_common(tmp, ptr, "ldw", retval);
+ *(u32 *)x = (u32)tmp;
+ break;
+ case 8:
+ __get_user_asm_64(x, ptr, retval);
+ break;
+ }
+
+ return retval;
+}
+#define __get_user_fn __get_user_fn
+
+unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
+unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
+
+unsigned long __clear_user(void __user *to, unsigned long n);
+#define __clear_user __clear_user
+
+#include <asm-generic/uaccess.h>
+
+#endif /* __ASM_CSKY_UACCESS_H */
diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h
new file mode 100644
index 0000000000..9cf97de9a2
--- /dev/null
+++ b/arch/csky/include/asm/unistd.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/csky/include/asm/uprobes.h b/arch/csky/include/asm/uprobes.h
new file mode 100644
index 0000000000..600388eb93
--- /dev/null
+++ b/arch/csky/include/asm/uprobes.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_UPROBES_H
+#define __ASM_CSKY_UPROBES_H
+
+#include <asm/probes.h>
+
+#define MAX_UINSN_BYTES 4
+
+#define UPROBE_SWBP_INSN USR_BKPT
+#define UPROBE_SWBP_INSN_SIZE 2
+#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
+
+typedef u32 uprobe_opcode_t;
+
+struct arch_uprobe_task {
+ unsigned long saved_trap_no;
+};
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u8 ixol[MAX_UINSN_BYTES];
+ };
+ struct arch_probe_insn api;
+ unsigned long insn_size;
+ bool simulate;
+};
+
+int uprobe_breakpoint_handler(struct pt_regs *regs);
+int uprobe_single_step_handler(struct pt_regs *regs);
+
+#endif /* __ASM_CSKY_UPROBES_H */
diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h
new file mode 100644
index 0000000000..bdce581b5f
--- /dev/null
+++ b/arch/csky/include/asm/vdso.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_VDSO_H
+#define __ASM_CSKY_VDSO_H
+
+#include <linux/types.h>
+
+#ifndef GENERIC_TIME_VSYSCALL
+struct vdso_data {
+};
+#endif
+
+/*
+ * The VDSO symbols are mapped into Linux so we can just use regular symbol
+ * addressing to get their offsets in userspace. The symbols are mapped at an
+ * offset of 0, but since the linker must support setting weak undefined
+ * symbols to the absolute address 0 it also happens to support other low
+ * addresses even when the code model suggests those low addresses would not
+ * otherwise be available.
+ */
+#define VDSO_SYMBOL(base, name) \
+({ \
+ extern const char __vdso_##name[]; \
+ (void __user *)((unsigned long)(base) + __vdso_##name); \
+})
+
+#endif /* __ASM_CSKY_VDSO_H */
diff --git a/arch/csky/include/asm/vdso/clocksource.h b/arch/csky/include/asm/vdso/clocksource.h
new file mode 100644
index 0000000000..dfca7b4724
--- /dev/null
+++ b/arch/csky/include/asm/vdso/clocksource.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_CSKY_CLOCKSOURCE_H
+#define __ASM_VDSO_CSKY_CLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif /* __ASM_VDSO_CSKY_CLOCKSOURCE_H */
diff --git a/arch/csky/include/asm/vdso/gettimeofday.h b/arch/csky/include/asm/vdso/gettimeofday.h
new file mode 100644
index 0000000000..6c4f144694
--- /dev/null
+++ b/arch/csky/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_CSKY_GETTIMEOFDAY_H
+#define __ASM_VDSO_CSKY_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+#include <asm/unistd.h>
+#include <abi/regdef.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long ret asm("a0");
+ register long nr asm(syscallid) = __NR_gettimeofday;
+
+ asm volatile ("trap 0\n"
+ : "=r" (ret)
+ : "r"(tv), "r"(tz), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm(syscallid) = __NR_clock_gettime64;
+
+ asm volatile ("trap 0\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct old_timespec32 *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm(syscallid) = __NR_clock_gettime;
+
+ asm volatile ("trap 0\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm(syscallid) = __NR_clock_getres_time64;
+
+ asm volatile ("trap 0\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct old_timespec32 *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm(syscallid) = __NR_clock_getres;
+
+ asm volatile ("trap 0\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+uint64_t csky_pmu_read_cc(void);
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+ const struct vdso_data *vd)
+{
+#ifdef CONFIG_CSKY_PMU_V1
+ return csky_pmu_read_cc();
+#else
+ return 0;
+#endif
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return _vdso_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CSKY_GETTIMEOFDAY_H */
diff --git a/arch/csky/include/asm/vdso/processor.h b/arch/csky/include/asm/vdso/processor.h
new file mode 100644
index 0000000000..39a6b561d0
--- /dev/null
+++ b/arch/csky/include/asm/vdso/processor.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_VDSO_CSKY_PROCESSOR_H
+#define __ASM_VDSO_CSKY_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#define cpu_relax() barrier()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CSKY_PROCESSOR_H */
diff --git a/arch/csky/include/asm/vdso/vsyscall.h b/arch/csky/include/asm/vdso/vsyscall.h
new file mode 100644
index 0000000000..c276211a7c
--- /dev/null
+++ b/arch/csky/include/asm/vdso/vsyscall.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_CSKY_VSYSCALL_H
+#define __ASM_VDSO_CSKY_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+static __always_inline struct vdso_data *__csky_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+#define __arch_get_k_vdso_data __csky_get_k_vdso_data
+
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CSKY_VSYSCALL_H */
diff --git a/arch/csky/include/asm/vmalloc.h b/arch/csky/include/asm/vmalloc.h
new file mode 100644
index 0000000000..43dca6336b
--- /dev/null
+++ b/arch/csky/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_CSKY_VMALLOC_H
+#define _ASM_CSKY_VMALLOC_H
+
+#endif /* _ASM_CSKY_VMALLOC_H */
diff --git a/arch/csky/include/uapi/asm/Kbuild b/arch/csky/include/uapi/asm/Kbuild
new file mode 100644
index 0000000000..e784701419
--- /dev/null
+++ b/arch/csky/include/uapi/asm/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += ucontext.h
diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h
new file mode 100644
index 0000000000..1aedd513b6
--- /dev/null
+++ b/arch/csky/include/uapi/asm/byteorder.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __ASM_CSKY_BYTEORDER_H
+#define __ASM_CSKY_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* __ASM_CSKY_BYTEORDER_H */
diff --git a/arch/csky/include/uapi/asm/cachectl.h b/arch/csky/include/uapi/asm/cachectl.h
new file mode 100644
index 0000000000..ed7fad1ea2
--- /dev/null
+++ b/arch/csky/include/uapi/asm/cachectl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __ASM_CSKY_CACHECTL_H
+#define __ASM_CSKY_CACHECTL_H
+
+/*
+ * See "man cacheflush"
+ */
+#define ICACHE (1<<0)
+#define DCACHE (1<<1)
+#define BCACHE (ICACHE|DCACHE)
+
+#endif /* __ASM_CSKY_CACHECTL_H */
diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h
new file mode 100644
index 0000000000..d0a8ac6a1b
--- /dev/null
+++ b/arch/csky/include/uapi/asm/perf_regs.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _ASM_CSKY_PERF_REGS_H
+#define _ASM_CSKY_PERF_REGS_H
+
+/* Index of struct pt_regs */
+enum perf_event_csky_regs {
+ PERF_REG_CSKY_TLS,
+ PERF_REG_CSKY_LR,
+ PERF_REG_CSKY_PC,
+ PERF_REG_CSKY_SR,
+ PERF_REG_CSKY_SP,
+ PERF_REG_CSKY_ORIG_A0,
+ PERF_REG_CSKY_A0,
+ PERF_REG_CSKY_A1,
+ PERF_REG_CSKY_A2,
+ PERF_REG_CSKY_A3,
+ PERF_REG_CSKY_REGS0,
+ PERF_REG_CSKY_REGS1,
+ PERF_REG_CSKY_REGS2,
+ PERF_REG_CSKY_REGS3,
+ PERF_REG_CSKY_REGS4,
+ PERF_REG_CSKY_REGS5,
+ PERF_REG_CSKY_REGS6,
+ PERF_REG_CSKY_REGS7,
+ PERF_REG_CSKY_REGS8,
+ PERF_REG_CSKY_REGS9,
+#if defined(__CSKYABIV2__)
+ PERF_REG_CSKY_EXREGS0,
+ PERF_REG_CSKY_EXREGS1,
+ PERF_REG_CSKY_EXREGS2,
+ PERF_REG_CSKY_EXREGS3,
+ PERF_REG_CSKY_EXREGS4,
+ PERF_REG_CSKY_EXREGS5,
+ PERF_REG_CSKY_EXREGS6,
+ PERF_REG_CSKY_EXREGS7,
+ PERF_REG_CSKY_EXREGS8,
+ PERF_REG_CSKY_EXREGS9,
+ PERF_REG_CSKY_EXREGS10,
+ PERF_REG_CSKY_EXREGS11,
+ PERF_REG_CSKY_EXREGS12,
+ PERF_REG_CSKY_EXREGS13,
+ PERF_REG_CSKY_EXREGS14,
+ PERF_REG_CSKY_HI,
+ PERF_REG_CSKY_LO,
+ PERF_REG_CSKY_DCSR,
+#endif
+ PERF_REG_CSKY_MAX,
+};
+#endif /* _ASM_CSKY_PERF_REGS_H */
diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h
new file mode 100644
index 0000000000..3be9c14334
--- /dev/null
+++ b/arch/csky/include/uapi/asm/ptrace.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _CSKY_PTRACE_H
+#define _CSKY_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long tls;
+ unsigned long lr;
+ unsigned long pc;
+ unsigned long sr;
+ unsigned long usp;
+
+ /*
+ * a0, a1, a2, a3:
+ * abiv1: r2, r3, r4, r5
+ * abiv2: r0, r1, r2, r3
+ */
+ unsigned long orig_a0;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+
+ /*
+ * ABIV2: r4 ~ r13
+ * ABIV1: r6 ~ r14, r1
+ */
+ unsigned long regs[10];
+
+#if defined(__CSKYABIV2__)
+ /* r16 ~ r30 */
+ unsigned long exregs[15];
+
+ unsigned long rhi;
+ unsigned long rlo;
+ unsigned long dcsr;
+#endif
+};
+
+struct user_fp {
+ unsigned long vr[96];
+ unsigned long fcr;
+ unsigned long fesr;
+ unsigned long fid;
+ unsigned long reserved;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif /* _CSKY_PTRACE_H */
diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h
new file mode 100644
index 0000000000..859afb6024
--- /dev/null
+++ b/arch/csky/include/uapi/asm/sigcontext.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __ASM_CSKY_SIGCONTEXT_H
+#define __ASM_CSKY_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+struct sigcontext {
+ struct pt_regs sc_pt_regs;
+ struct user_fp sc_user_fp;
+};
+
+#endif /* __ASM_CSKY_SIGCONTEXT_H */
diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
new file mode 100644
index 0000000000..7ff6a2466a
--- /dev/null
+++ b/arch/csky/include/uapi/asm/unistd.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
+#include <asm-generic/unistd.h>
+
+#define __NR_set_thread_area (__NR_arch_specific_syscall + 0)
+__SYSCALL(__NR_set_thread_area, sys_set_thread_area)
+#define __NR_cacheflush (__NR_arch_specific_syscall + 1)
+__SYSCALL(__NR_cacheflush, sys_cacheflush)