summaryrefslogtreecommitdiffstats
path: root/arch/csky/include
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /arch/csky/include
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/csky/include')
-rw-r--r--arch/csky/include/asm/Kbuild8
-rw-r--r--arch/csky/include/asm/addrspace.h10
-rw-r--r--arch/csky/include/asm/asid.h78
-rw-r--r--arch/csky/include/asm/atomic.h212
-rw-r--r--arch/csky/include/asm/barrier.h48
-rw-r--r--arch/csky/include/asm/bitops.h82
-rw-r--r--arch/csky/include/asm/bug.h27
-rw-r--r--arch/csky/include/asm/cache.h32
-rw-r--r--arch/csky/include/asm/cacheflush.h10
-rw-r--r--arch/csky/include/asm/checksum.h50
-rw-r--r--arch/csky/include/asm/cmpxchg.h73
-rw-r--r--arch/csky/include/asm/elf.h92
-rw-r--r--arch/csky/include/asm/fixmap.h34
-rw-r--r--arch/csky/include/asm/ftrace.h31
-rw-r--r--arch/csky/include/asm/highmem.h43
-rw-r--r--arch/csky/include/asm/io.h45
-rw-r--r--arch/csky/include/asm/irq_work.h11
-rw-r--r--arch/csky/include/asm/irqflags.h49
-rw-r--r--arch/csky/include/asm/kprobes.h48
-rw-r--r--arch/csky/include/asm/memory.h25
-rw-r--r--arch/csky/include/asm/mmu.h13
-rw-r--r--arch/csky/include/asm/mmu_context.h49
-rw-r--r--arch/csky/include/asm/page.h94
-rw-r--r--arch/csky/include/asm/pci.h34
-rw-r--r--arch/csky/include/asm/perf_event.h15
-rw-r--r--arch/csky/include/asm/pgalloc.h77
-rw-r--r--arch/csky/include/asm/pgtable.h278
-rw-r--r--arch/csky/include/asm/probes.h24
-rw-r--r--arch/csky/include/asm/processor.h95
-rw-r--r--arch/csky/include/asm/ptrace.h101
-rw-r--r--arch/csky/include/asm/reg_ops.h26
-rw-r--r--arch/csky/include/asm/segment.h18
-rw-r--r--arch/csky/include/asm/shmparam.h11
-rw-r--r--arch/csky/include/asm/smp.h30
-rw-r--r--arch/csky/include/asm/spinlock.h256
-rw-r--r--arch/csky/include/asm/spinlock_types.h37
-rw-r--r--arch/csky/include/asm/stackprotector.h29
-rw-r--r--arch/csky/include/asm/string.h13
-rw-r--r--arch/csky/include/asm/switch_to.h36
-rw-r--r--arch/csky/include/asm/syscall.h77
-rw-r--r--arch/csky/include/asm/syscalls.h15
-rw-r--r--arch/csky/include/asm/tcm.h24
-rw-r--r--arch/csky/include/asm/thread_info.h93
-rw-r--r--arch/csky/include/asm/tlb.h25
-rw-r--r--arch/csky/include/asm/tlbflush.h25
-rw-r--r--arch/csky/include/asm/traps.h44
-rw-r--r--arch/csky/include/asm/uaccess.h410
-rw-r--r--arch/csky/include/asm/unistd.h6
-rw-r--r--arch/csky/include/asm/uprobes.h33
-rw-r--r--arch/csky/include/asm/vdso.h12
-rw-r--r--arch/csky/include/asm/vmalloc.h4
-rw-r--r--arch/csky/include/uapi/asm/Kbuild2
-rw-r--r--arch/csky/include/uapi/asm/byteorder.h9
-rw-r--r--arch/csky/include/uapi/asm/cachectl.h13
-rw-r--r--arch/csky/include/uapi/asm/perf_regs.h51
-rw-r--r--arch/csky/include/uapi/asm/ptrace.h52
-rw-r--r--arch/csky/include/uapi/asm/sigcontext.h14
-rw-r--r--arch/csky/include/uapi/asm/unistd.h15
58 files changed, 3168 insertions, 0 deletions
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
new file mode 100644
index 000000000..2a5a4d94f
--- /dev/null
+++ b/arch/csky/include/asm/Kbuild
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += asm-offsets.h
+generic-y += gpio.h
+generic-y += kvm_para.h
+generic-y += qrwlock.h
+generic-y += seccomp.h
+generic-y += user.h
+generic-y += vmlinux.lds.h
diff --git a/arch/csky/include/asm/addrspace.h b/arch/csky/include/asm/addrspace.h
new file mode 100644
index 000000000..d1c2ede69
--- /dev/null
+++ b/arch/csky/include/asm/addrspace.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_ADDRSPACE_H
+#define __ASM_CSKY_ADDRSPACE_H
+
+#define KSEG0 0x80000000ul
+#define KSEG0ADDR(a) (((unsigned long)a & 0x1fffffff) | KSEG0)
+
+#endif /* __ASM_CSKY_ADDRSPACE_H */
diff --git a/arch/csky/include/asm/asid.h b/arch/csky/include/asm/asid.h
new file mode 100644
index 000000000..ac08b0ffb
--- /dev/null
+++ b/arch/csky/include/asm/asid.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ASM_ASID_H
+#define __ASM_ASM_ASID_H
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+
+struct asid_info
+{
+ atomic64_t generation;
+ unsigned long *map;
+ atomic64_t __percpu *active;
+ u64 __percpu *reserved;
+ u32 bits;
+ /* Lock protecting the structure */
+ raw_spinlock_t lock;
+ /* Which CPU requires context flush on next call */
+ cpumask_t flush_pending;
+ /* Number of ASID allocated by context (shift value) */
+ unsigned int ctxt_shift;
+ /* Callback to locally flush the context. */
+ void (*flush_cpu_ctxt_cb)(void);
+};
+
+#define NUM_ASIDS(info) (1UL << ((info)->bits))
+#define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift)
+
+#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
+
+void asid_new_context(struct asid_info *info, atomic64_t *pasid,
+ unsigned int cpu, struct mm_struct *mm);
+
+/*
+ * Check the ASID is still valid for the context. If not generate a new ASID.
+ *
+ * @pasid: Pointer to the current ASID batch
+ * @cpu: current CPU ID. Must have been acquired throught get_cpu()
+ */
+static inline void asid_check_context(struct asid_info *info,
+ atomic64_t *pasid, unsigned int cpu,
+ struct mm_struct *mm)
+{
+ u64 asid, old_active_asid;
+
+ asid = atomic64_read(pasid);
+
+ /*
+ * The memory ordering here is subtle.
+ * If our active_asid is non-zero and the ASID matches the current
+ * generation, then we update the active_asid entry with a relaxed
+ * cmpxchg. Racing with a concurrent rollover means that either:
+ *
+ * - We get a zero back from the cmpxchg and end up waiting on the
+ * lock. Taking the lock synchronises with the rollover and so
+ * we are forced to see the updated generation.
+ *
+ * - We get a valid ASID back from the cmpxchg, which means the
+ * relaxed xchg in flush_context will treat us as reserved
+ * because atomic RmWs are totally ordered for a given location.
+ */
+ old_active_asid = atomic64_read(&active_asid(info, cpu));
+ if (old_active_asid &&
+ !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
+ atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
+ old_active_asid, asid))
+ return;
+
+ asid_new_context(info, pasid, cpu, mm);
+}
+
+int asid_allocator_init(struct asid_info *info,
+ u32 bits, unsigned int asid_per_ctxt,
+ void (*flush_cpu_ctxt_cb)(void));
+
+#endif
diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h
new file mode 100644
index 000000000..e369d73b1
--- /dev/null
+++ b/arch/csky/include/asm/atomic.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_ATOMIC_H
+#define __ASM_CSKY_ATOMIC_H
+
+#include <linux/version.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#ifdef CONFIG_CPU_HAS_LDSTEX
+
+#define __atomic_add_unless __atomic_add_unless
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ unsigned long tmp, ret;
+
+ smp_mb();
+
+ asm volatile (
+ "1: ldex.w %0, (%3) \n"
+ " mov %1, %0 \n"
+ " cmpne %0, %4 \n"
+ " bf 2f \n"
+ " add %0, %2 \n"
+ " stex.w %0, (%3) \n"
+ " bez %0, 1b \n"
+ "2: \n"
+ : "=&r" (tmp), "=&r" (ret)
+ : "r" (a), "r"(&v->counter), "r"(u)
+ : "memory");
+
+ if (ret != u)
+ smp_mb();
+
+ return ret;
+}
+
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ \
+ asm volatile ( \
+ "1: ldex.w %0, (%2) \n" \
+ " " #op " %0, %1 \n" \
+ " stex.w %0, (%2) \n" \
+ " bez %0, 1b \n" \
+ : "=&r" (tmp) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp, ret; \
+ \
+ smp_mb(); \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " " #op " %0, %2 \n" \
+ " mov %1, %0 \n" \
+ " stex.w %0, (%3) \n" \
+ " bez %0, 1b \n" \
+ : "=&r" (tmp), "=&r" (ret) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ smp_mb(); \
+ \
+ return ret; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp, ret; \
+ \
+ smp_mb(); \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " mov %1, %0 \n" \
+ " " #op " %0, %2 \n" \
+ " stex.w %0, (%3) \n" \
+ " bez %0, 1b \n" \
+ : "=&r" (tmp), "=&r" (ret) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ smp_mb(); \
+ \
+ return ret; \
+}
+
+#else /* CONFIG_CPU_HAS_LDSTEX */
+
+#include <linux/irqflags.h>
+
+#define __atomic_add_unless __atomic_add_unless
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ unsigned long tmp, ret, flags;
+
+ raw_local_irq_save(flags);
+
+ asm volatile (
+ " ldw %0, (%3) \n"
+ " mov %1, %0 \n"
+ " cmpne %0, %4 \n"
+ " bf 2f \n"
+ " add %0, %2 \n"
+ " stw %0, (%3) \n"
+ "2: \n"
+ : "=&r" (tmp), "=&r" (ret)
+ : "r" (a), "r"(&v->counter), "r"(u)
+ : "memory");
+
+ raw_local_irq_restore(flags);
+
+ return ret;
+}
+
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp, flags; \
+ \
+ raw_local_irq_save(flags); \
+ \
+ asm volatile ( \
+ " ldw %0, (%2) \n" \
+ " " #op " %0, %1 \n" \
+ " stw %0, (%2) \n" \
+ : "=&r" (tmp) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ \
+ raw_local_irq_restore(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp, ret, flags; \
+ \
+ raw_local_irq_save(flags); \
+ \
+ asm volatile ( \
+ " ldw %0, (%3) \n" \
+ " " #op " %0, %2 \n" \
+ " stw %0, (%3) \n" \
+ " mov %1, %0 \n" \
+ : "=&r" (tmp), "=&r" (ret) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ \
+ raw_local_irq_restore(flags); \
+ \
+ return ret; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp, ret, flags; \
+ \
+ raw_local_irq_save(flags); \
+ \
+ asm volatile ( \
+ " ldw %0, (%3) \n" \
+ " mov %1, %0 \n" \
+ " " #op " %0, %2 \n" \
+ " stw %0, (%3) \n" \
+ : "=&r" (tmp), "=&r" (ret) \
+ : "r" (i), "r"(&v->counter) \
+ : "memory"); \
+ \
+ raw_local_irq_restore(flags); \
+ \
+ return ret; \
+}
+
+#endif /* CONFIG_CPU_HAS_LDSTEX */
+
+#define atomic_add_return atomic_add_return
+ATOMIC_OP_RETURN(add, +)
+#define atomic_sub_return atomic_sub_return
+ATOMIC_OP_RETURN(sub, -)
+
+#define atomic_fetch_add atomic_fetch_add
+ATOMIC_FETCH_OP(add, +)
+#define atomic_fetch_sub atomic_fetch_sub
+ATOMIC_FETCH_OP(sub, -)
+#define atomic_fetch_and atomic_fetch_and
+ATOMIC_FETCH_OP(and, &)
+#define atomic_fetch_or atomic_fetch_or
+ATOMIC_FETCH_OP(or, |)
+#define atomic_fetch_xor atomic_fetch_xor
+ATOMIC_FETCH_OP(xor, ^)
+
+#define atomic_and atomic_and
+ATOMIC_OP(and, &)
+#define atomic_or atomic_or
+ATOMIC_OP(or, |)
+#define atomic_xor atomic_xor
+ATOMIC_OP(xor, ^)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#include <asm-generic/atomic.h>
+
+#endif /* __ASM_CSKY_ATOMIC_H */
diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h
new file mode 100644
index 000000000..a430e7fdd
--- /dev/null
+++ b/arch/csky/include/asm/barrier.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_BARRIER_H
+#define __ASM_CSKY_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#define nop() asm volatile ("nop\n":::"memory")
+
+/*
+ * sync: completion barrier, all sync.xx instructions
+ * guarantee the last response recieved by bus transaction
+ * made by ld/st instructions before sync.s
+ * sync.s: inherit from sync, but also shareable to other cores
+ * sync.i: inherit from sync, but also flush cpu pipeline
+ * sync.is: the same with sync.i + sync.s
+ *
+ * bar.brwarw: ordering barrier for all load/store instructions before it
+ * bar.brwarws: ordering barrier for all load/store instructions before it
+ * and shareable to other cores
+ * bar.brar: ordering barrier for all load instructions before it
+ * bar.brars: ordering barrier for all load instructions before it
+ * and shareable to other cores
+ * bar.bwaw: ordering barrier for all store instructions before it
+ * bar.bwaws: ordering barrier for all store instructions before it
+ * and shareable to other cores
+ */
+
+#ifdef CONFIG_CPU_HAS_CACHEV2
+#define mb() asm volatile ("sync.s\n":::"memory")
+
+#ifdef CONFIG_SMP
+#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory")
+#define __smp_rmb() asm volatile ("bar.brars\n":::"memory")
+#define __smp_wmb() asm volatile ("bar.bwaws\n":::"memory")
+#endif /* CONFIG_SMP */
+
+#define sync_is() asm volatile ("sync.is\n":::"memory")
+
+#else /* !CONFIG_CPU_HAS_CACHEV2 */
+#define mb() asm volatile ("sync\n":::"memory")
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_CSKY_BARRIER_H */
diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h
new file mode 100644
index 000000000..43b9838bf
--- /dev/null
+++ b/arch/csky/include/asm/bitops.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_BITOPS_H
+#define __ASM_CSKY_BITOPS_H
+
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+/*
+ * asm-generic/bitops/ffs.h
+ */
+static inline int ffs(int x)
+{
+ if (!x)
+ return 0;
+
+ asm volatile (
+ "brev %0\n"
+ "ff1 %0\n"
+ "addi %0, 1\n"
+ : "=&r"(x)
+ : "0"(x));
+ return x;
+}
+
+/*
+ * asm-generic/bitops/__ffs.h
+ */
+static __always_inline unsigned long __ffs(unsigned long x)
+{
+ asm volatile (
+ "brev %0\n"
+ "ff1 %0\n"
+ : "=&r"(x)
+ : "0"(x));
+ return x;
+}
+
+/*
+ * asm-generic/bitops/fls.h
+ */
+static __always_inline int fls(unsigned int x)
+{
+ asm volatile(
+ "ff1 %0\n"
+ : "=&r"(x)
+ : "0"(x));
+
+ return (32 - x);
+}
+
+/*
+ * asm-generic/bitops/__fls.h
+ */
+static __always_inline unsigned long __fls(unsigned long x)
+{
+ return fls(x) - 1;
+}
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/atomic.h>
+
+/*
+ * bug fix, why only could use atomic!!!!
+ */
+#include <asm-generic/bitops/non-atomic.h>
+#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
+
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#endif /* __ASM_CSKY_BITOPS_H */
diff --git a/arch/csky/include/asm/bug.h b/arch/csky/include/asm/bug.h
new file mode 100644
index 000000000..33ebd16b9
--- /dev/null
+++ b/arch/csky/include/asm/bug.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_BUG_H
+#define __ASM_CSKY_BUG_H
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#define BUG() \
+do { \
+ asm volatile ("bkpt\n"); \
+ unreachable(); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+
+void die(struct pt_regs *regs, const char *str);
+void show_regs(struct pt_regs *regs);
+void show_code(struct pt_regs *regs);
+
+#endif /* __ASM_CSKY_BUG_H */
diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h
new file mode 100644
index 000000000..4b5c09bf1
--- /dev/null
+++ b/arch/csky/include/asm/cache.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CACHE_H
+#define __ASM_CSKY_CACHE_H
+
+/* bytes per L1 cache line */
+#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#ifndef __ASSEMBLY__
+
+void dcache_wb_line(unsigned long start);
+
+void icache_inv_range(unsigned long start, unsigned long end);
+void icache_inv_all(void);
+void local_icache_inv_all(void *priv);
+
+void dcache_wb_range(unsigned long start, unsigned long end);
+void dcache_wbinv_all(void);
+
+void cache_wbinv_range(unsigned long start, unsigned long end);
+void cache_wbinv_all(void);
+
+void dma_wbinv_range(unsigned long start, unsigned long end);
+void dma_inv_range(unsigned long start, unsigned long end);
+void dma_wb_range(unsigned long start, unsigned long end);
+
+#endif
+#endif /* __ASM_CSKY_CACHE_H */
diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h
new file mode 100644
index 000000000..f0b8f2542
--- /dev/null
+++ b/arch/csky/include/asm/cacheflush.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_CACHEFLUSH_H
+#define __ASM_CSKY_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <abi/cacheflush.h>
+
+#endif /* __ASM_CSKY_CACHEFLUSH_H */
diff --git a/arch/csky/include/asm/checksum.h b/arch/csky/include/asm/checksum.h
new file mode 100644
index 000000000..768582429
--- /dev/null
+++ b/arch/csky/include/asm/checksum.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_CHECKSUM_H
+#define __ASM_CSKY_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <asm/byteorder.h>
+
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 tmp;
+
+ asm volatile(
+ "mov %1, %0\n"
+ "rori %0, 16\n"
+ "addu %0, %1\n"
+ "lsri %0, 16\n"
+ : "=r"(csum), "=r"(tmp)
+ : "0"(csum));
+
+ return (__force __sum16) ~csum;
+}
+#define csum_fold csum_fold
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len, unsigned short proto, __wsum sum)
+{
+ asm volatile(
+ "clrc\n"
+ "addc %0, %1\n"
+ "addc %0, %2\n"
+ "addc %0, %3\n"
+ "inct %0\n"
+ : "=r"(sum)
+ : "r"((__force u32)saddr), "r"((__force u32)daddr),
+#ifdef __BIG_ENDIAN
+ "r"(proto + len),
+#else
+ "r"((proto + len) << 8),
+#endif
+ "0" ((__force unsigned long)sum)
+ : "cc");
+ return sum;
+}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* __ASM_CSKY_CHECKSUM_H */
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
new file mode 100644
index 000000000..89224530a
--- /dev/null
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_CMPXCHG_H
+#define __ASM_CSKY_CMPXCHG_H
+
+#ifdef CONFIG_CPU_HAS_LDSTEX
+#include <asm/barrier.h>
+
+extern void __bad_xchg(void);
+
+#define __xchg(new, ptr, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ unsigned long tmp; \
+ switch (size) { \
+ case 4: \
+ smp_mb(); \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " mov %1, %2 \n" \
+ " stex.w %1, (%3) \n" \
+ " bez %1, 1b \n" \
+ : "=&r" (__ret), "=&r" (tmp) \
+ : "r" (__new), "r"(__ptr) \
+ :); \
+ smp_mb(); \
+ break; \
+ default: \
+ __bad_xchg(); \
+ } \
+ __ret; \
+})
+
+#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr))))
+
+#define __cmpxchg(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(new) __tmp; \
+ __typeof__(old) __old = (old); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ smp_mb(); \
+ asm volatile ( \
+ "1: ldex.w %0, (%3) \n" \
+ " cmpne %0, %4 \n" \
+ " bt 2f \n" \
+ " mov %1, %2 \n" \
+ " stex.w %1, (%3) \n" \
+ " bez %1, 1b \n" \
+ "2: \n" \
+ : "=&r" (__ret), "=&r" (__tmp) \
+ : "r" (__new), "r"(__ptr), "r"(__old) \
+ :); \
+ smp_mb(); \
+ break; \
+ default: \
+ __bad_xchg(); \
+ } \
+ __ret; \
+})
+
+#define cmpxchg(ptr, o, n) \
+ (__cmpxchg((ptr), (o), (n), sizeof(*(ptr))))
+#else
+#include <asm-generic/cmpxchg.h>
+#endif
+
+#endif /* __ASM_CSKY_CMPXCHG_H */
diff --git a/arch/csky/include/asm/elf.h b/arch/csky/include/asm/elf.h
new file mode 100644
index 000000000..e1ec55827
--- /dev/null
+++ b/arch/csky/include/asm/elf.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_ELF_H
+#define __ASM_CSKY_ELF_H
+
+#include <asm/ptrace.h>
+#include <abi/regdef.h>
+
+#define ELF_ARCH EM_CSKY
+#define EM_CSKY_OLD 39
+
+/* CSKY Relocations */
+#define R_CSKY_NONE 0
+#define R_CSKY_32 1
+#define R_CSKY_PCIMM8BY4 2
+#define R_CSKY_PCIMM11BY2 3
+#define R_CSKY_PCIMM4BY2 4
+#define R_CSKY_PC32 5
+#define R_CSKY_PCRELJSR_IMM11BY2 6
+#define R_CSKY_GNU_VTINHERIT 7
+#define R_CSKY_GNU_VTENTRY 8
+#define R_CSKY_RELATIVE 9
+#define R_CSKY_COPY 10
+#define R_CSKY_GLOB_DAT 11
+#define R_CSKY_JUMP_SLOT 12
+#define R_CSKY_ADDR_HI16 24
+#define R_CSKY_ADDR_LO16 25
+#define R_CSKY_PCRELJSR_IMM26BY2 40
+
+typedef unsigned long elf_greg_t;
+
+typedef struct user_fp elf_fpregset_t;
+
+/*
+ * In gdb/bfd elf32-csky.c, csky_elf_grok_prstatus() use fixed size of
+ * elf_prstatus. It's 148 for abiv1 and 220 for abiv2, the size is enough
+ * for coredump and no need full sizeof(struct pt_regs).
+ */
+#define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 2)
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) || \
+ ((x)->e_machine == EM_CSKY_OLD))
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+#define ELF_CLASS ELFCLASS32
+#define ELF_PLAT_INIT(_r, load_addr) { _r->a0 = 0; }
+
+#ifdef __cskyBE__
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE 0x0UL
+#include <abi/elf.h>
+
+/* Similar, but for a thread other than current. */
+struct task_struct;
+extern int dump_task_regs(struct task_struct *tsk, elf_gregset_t *elf_regs);
+#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
+
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation specific
+ * libraries for optimization. This is more specific in intent than poking
+ * at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#endif /* __ASM_CSKY_ELF_H */
diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h
new file mode 100644
index 000000000..81f9477d5
--- /dev/null
+++ b/arch/csky/include/asm/fixmap.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_FIXMAP_H
+#define __ASM_CSKY_FIXMAP_H
+
+#include <asm/page.h>
+#include <asm/memory.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+enum fixed_addresses {
+#ifdef CONFIG_HAVE_TCM
+ FIX_TCM = TCM_NR_PAGES,
+#endif
+#ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+#endif
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#include <asm-generic/fixmap.h>
+
+extern void fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base);
+extern void __init fixaddr_init(void);
+
+#endif /* __ASM_CSKY_FIXMAP_H */
diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h
new file mode 100644
index 000000000..fae72b0b1
--- /dev/null
+++ b/arch/csky/include/asm/ftrace.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_FTRACE_H
+#define __ASM_CSKY_FTRACE_H
+
+#define MCOUNT_INSN_SIZE 14
+
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+
+#define MCOUNT_ADDR ((unsigned long)_mcount)
+
+#ifndef __ASSEMBLY__
+
+extern void _mcount(unsigned long);
+
+extern void ftrace_graph_call(void);
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_CSKY_FTRACE_H */
diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h
new file mode 100644
index 000000000..14645e3d5
--- /dev/null
+++ b/arch/csky/include/asm/highmem.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_HIGHMEM_H
+#define __ASM_CSKY_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <asm/kmap_types.h>
+#include <asm/cache.h>
+
+/* undef for production */
+#define HIGHMEM_DEBUG 1
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *pkmap_page_table;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#define LAST_PKMAP 1024
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define ARCH_HAS_KMAP_FLUSH_TLB
+extern void kmap_flush_tlb(unsigned long addr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
+
+#define flush_cache_kmaps() do {} while (0)
+
+extern void kmap_init(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_CSKY_HIGHMEM_H */
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
new file mode 100644
index 000000000..e909587f2
--- /dev/null
+++ b/arch/csky/include/asm/io.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_IO_H
+#define __ASM_CSKY_IO_H
+
+#include <linux/pgtable.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+/*
+ * I/O memory access primitives. Reads are ordered relative to any
+ * following Normal memory access. Writes are ordered relative to any prior
+ * Normal memory access.
+ *
+ * For CACHEV1 (807, 810), store instruction could fast retire, so we need
+ * another mb() to prevent st fast retire.
+ *
+ * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't
+ * fast retire.
+ */
+#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; })
+
+#ifdef CONFIG_CPU_HAS_CACHEV2
+#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); })
+#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); })
+#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); })
+#else
+#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); })
+#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); })
+#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
+#endif
+
+/*
+ * I/O memory mapping functions.
+ */
+#define ioremap_wc(addr, size) \
+ ioremap_prot((addr), (size), \
+ (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)
+
+#include <asm-generic/io.h>
+
+#endif /* __ASM_CSKY_IO_H */
diff --git a/arch/csky/include/asm/irq_work.h b/arch/csky/include/asm/irq_work.h
new file mode 100644
index 000000000..33aaf39d6
--- /dev/null
+++ b/arch/csky/include/asm/irq_work.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_IRQ_WORK_H
+#define __ASM_CSKY_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return true;
+}
+extern void arch_irq_work_raise(void);
+#endif /* __ASM_CSKY_IRQ_WORK_H */
diff --git a/arch/csky/include/asm/irqflags.h b/arch/csky/include/asm/irqflags.h
new file mode 100644
index 000000000..9e3a569a5
--- /dev/null
+++ b/arch/csky/include/asm/irqflags.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_IRQFLAGS_H
+#define __ASM_CSKY_IRQFLAGS_H
+#include <abi/reg_ops.h>
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = mfcr("psr");
+ asm volatile("psrclr ie\n":::"memory");
+ return flags;
+}
+#define arch_local_irq_save arch_local_irq_save
+
+static inline void arch_local_irq_enable(void)
+{
+ asm volatile("psrset ee, ie\n":::"memory");
+}
+#define arch_local_irq_enable arch_local_irq_enable
+
+static inline void arch_local_irq_disable(void)
+{
+ asm volatile("psrclr ie\n":::"memory");
+}
+#define arch_local_irq_disable arch_local_irq_disable
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ return mfcr("psr");
+}
+#define arch_local_save_flags arch_local_save_flags
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ mtcr("psr", flags);
+}
+#define arch_local_irq_restore arch_local_irq_restore
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (1<<6));
+}
+#define arch_irqs_disabled_flags arch_irqs_disabled_flags
+
+#include <asm-generic/irqflags.h>
+
+#endif /* __ASM_CSKY_IRQFLAGS_H */
diff --git a/arch/csky/include/asm/kprobes.h b/arch/csky/include/asm/kprobes.h
new file mode 100644
index 000000000..b647bbde4
--- /dev/null
+++ b/arch/csky/include/asm/kprobes.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_KPROBES_H
+#define __ASM_CSKY_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#ifdef CONFIG_KPROBES
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 1
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+#include <asm/probes.h>
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* Single step context for kprobe */
+struct kprobe_step_ctx {
+ unsigned long ss_pending;
+ unsigned long match_addr;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ unsigned long saved_sr;
+ struct prev_kprobe prev_kprobe;
+ struct kprobe_step_ctx ss_ctx;
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
+int kprobe_breakpoint_handler(struct pt_regs *regs);
+int kprobe_single_step_handler(struct pt_regs *regs);
+void kretprobe_trampoline(void);
+void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
+
+#endif /* CONFIG_KPROBES */
+#endif /* __ASM_CSKY_KPROBES_H */
diff --git a/arch/csky/include/asm/memory.h b/arch/csky/include/asm/memory.h
new file mode 100644
index 000000000..a65c6759f
--- /dev/null
+++ b/arch/csky/include/asm/memory.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_MEMORY_H
+#define __ASM_CSKY_MEMORY_H
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#define FIXADDR_TOP _AC(0xffffc000, UL)
+#define PKMAP_BASE _AC(0xff800000, UL)
+#define VMALLOC_START _AC(0xc0008000, UL)
+#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2))
+
+#ifdef CONFIG_HAVE_TCM
+#ifdef CONFIG_HAVE_DTCM
+#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES)
+#else
+#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES)
+#endif
+#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL)
+#endif
+
+#endif
diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h
new file mode 100644
index 000000000..26fbb1d15
--- /dev/null
+++ b/arch/csky/include/asm/mmu.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_MMU_H
+#define __ASM_CSKY_MMU_H
+
+typedef struct {
+ atomic64_t asid;
+ void *vdso;
+ cpumask_t icache_stale_mask;
+} mm_context_t;
+
+#endif /* __ASM_CSKY_MMU_H */
diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h
new file mode 100644
index 000000000..abdf1f1cb
--- /dev/null
+++ b/arch/csky/include/asm/mmu_context.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_MMU_CONTEXT_H
+#define __ASM_CSKY_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <abi/ckmmu.h>
+
+#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
+ setup_pgd(__pa(pgd), false)
+
+#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \
+ setup_pgd(__pa(pgd), true)
+
+#define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1)
+#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK)
+
+#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; })
+#define activate_mm(prev,next) switch_mm(prev, next, current)
+
+#define destroy_context(mm) do {} while (0)
+#define enter_lazy_tlb(mm, tsk) do {} while (0)
+#define deactivate_mm(tsk, mm) do {} while (0)
+
+void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
+
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (prev != next)
+ check_and_switch_context(next, cpu);
+
+ TLBMISS_HANDLER_SETUP_PGD(next->pgd);
+ write_mmu_entryhi(next->context.asid.counter);
+
+ flush_icache_deferred(next);
+}
+#endif /* __ASM_CSKY_MMU_CONTEXT_H */
diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h
new file mode 100644
index 000000000..16878240e
--- /dev/null
+++ b/arch/csky/include/asm/page.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PAGE_H
+#define __ASM_CSKY_PAGE_H
+
+#include <asm/setup.h>
+#include <asm/cache.h>
+#include <linux/const.h>
+
+/*
+ * PAGE_SHIFT determines the page size: 4KB
+ */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define THREAD_SIZE (PAGE_SIZE * 2)
+#define THREAD_MASK (~(THREAD_SIZE - 1))
+#define THREAD_SHIFT (PAGE_SHIFT + 1)
+
+
+/*
+ * For C-SKY "User-space:Kernel-space" is "2GB:2GB" fixed by hardware and there
+ * are two segment registers (MSA0 + MSA1) to mapping 512MB + 512MB physical
+ * address region. We use them mapping kernel 1GB direct-map address area and
+ * for more than 1GB of memory we use highmem.
+ */
+#define PAGE_OFFSET 0x80000000
+#define SSEG_SIZE 0x20000000
+#define LOWMEM_LIMIT (SSEG_SIZE * 2)
+
+#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
+
+#ifndef __ASSEMBLY__
+
+#include <linux/pfn.h>
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \
+ (void *)(kaddr) < high_memory)
+#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+extern void *memset(void *dest, int c, size_t l);
+extern void *memcpy(void *to, const void *from, size_t l);
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr)))
+
+struct page;
+
+#include <abi/page.h>
+
+struct vm_area_struct;
+
+typedef struct { unsigned long pte_low; } pte_t;
+#define pte_val(x) ((x).pte_low)
+
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+extern unsigned long va_pa_offset;
+
+#define ARCH_PFN_OFFSET PFN_DOWN(va_pa_offset + PHYS_OFFSET_OFFSET)
+
+#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + va_pa_offset)
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - va_pa_offset))
+
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+
+#define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \
+ PHYS_OFFSET_OFFSET)
+#define virt_to_page(x) (mem_map + MAP_NR(x))
+
+#define pfn_to_kaddr(x) __va(PFN_PHYS(x))
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_CSKY_PAGE_H */
diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h
new file mode 100644
index 000000000..ebc765b1f
--- /dev/null
+++ b/arch/csky/include/asm/pci.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_PCI_H
+#define __ASM_CSKY_PCI_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+
+#define PCIBIOS_MIN_IO 0
+#define PCIBIOS_MIN_MEM 0
+
+/* C-SKY shim does not initialize PCI bus */
+#define pcibios_assign_all_busses() 1
+
+extern int isa_dma_bridge_buggy;
+
+#ifdef CONFIG_PCI
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ /* no legacy IRQ on csky */
+ return -ENODEV;
+}
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ /* always show the domain in /proc */
+ return 1;
+}
+#endif /* CONFIG_PCI */
+
+#endif /* __ASM_CSKY_PCI_H */
diff --git a/arch/csky/include/asm/perf_event.h b/arch/csky/include/asm/perf_event.h
new file mode 100644
index 000000000..572093e11
--- /dev/null
+++ b/arch/csky/include/asm/perf_event.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_PERF_EVENT_H
+#define __ASM_CSKY_PERF_EVENT_H
+
+#include <abi/regdef.h>
+
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->pc = (__ip); \
+ regs_fp(regs) = (unsigned long) __builtin_frame_address(0); \
+ asm volatile("mov %0, sp\n":"=r"((regs)->usp)); \
+}
+
+#endif /* __ASM_PERF_EVENT_ELF_H */
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
new file mode 100644
index 000000000..d58d8146b
--- /dev/null
+++ b/arch/csky/include/asm/pgalloc.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_PGALLOC_H
+#define __ASM_CSKY_PGALLOC_H
+
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#include <asm-generic/pgalloc.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ set_pmd(pmd, __pmd(__pa(pte)));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte)
+{
+ set_pmd(pmd, __pmd(__pa(page_address(pte))));
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+extern void pgd_init(unsigned long *p);
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ pte_t *pte;
+ unsigned long i;
+
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
+ for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
+ (pte + i)->pte_low = _PAGE_GLOBAL;
+
+ return pte;
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *ret;
+ pgd_t *init;
+
+ ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ if (ret) {
+ init = pgd_offset(&init_mm, 0UL);
+ pgd_init((unsigned long *)ret);
+ memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ /* prevent out of order excute */
+ smp_mb();
+#ifdef CONFIG_CPU_NEED_TLBSYNC
+ dcache_wb_range((unsigned int)ret,
+ (unsigned int)(ret + PTRS_PER_PGD));
+#endif
+ }
+
+ return ret;
+}
+
+#define __pte_free_tlb(tlb, pte, address) \
+do { \
+ pgtable_pte_page_dtor(pte); \
+ tlb_remove_page(tlb, pte); \
+} while (0)
+
+extern void pagetable_init(void);
+extern void pre_mmu_init(void);
+extern void pre_trap_init(void);
+
+#endif /* __ASM_CSKY_PGALLOC_H */
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
new file mode 100644
index 000000000..2002cb7f1
--- /dev/null
+++ b/arch/csky/include/asm/pgtable.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_PGTABLE_H
+#define __ASM_CSKY_PGTABLE_H
+
+#include <asm/fixmap.h>
+#include <asm/memory.h>
+#include <asm/addrspace.h>
+#include <abi/pgtable-bits.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0UL
+
+/*
+ * C-SKY is two-level paging structure:
+ */
+#define PGD_ORDER 0
+#define PTE_ORDER 0
+
+#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
+#define pte_clear(mm, addr, ptep) set_pte((ptep), \
+ (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
+#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
+#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
+#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
+ | pgprot_val(prot))
+
+#define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED)
+#define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED)
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \
+ _CACHE_MASK)
+
+#define __swp_type(x) (((x).val >> 4) & 0xff)
+#define __swp_offset(x) ((x).val >> 12)
+#define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \
+ ((offset) << 12) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \
+ pgprot_val(pgprot))
+
+/*
+ * CSKY can't do page protection for execute, and considers that the same like
+ * read. Also, write permissions imply read permissions. This is the closest
+ * we can get by reasonable means..
+ */
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _CACHE_CACHED)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _CACHE_CACHED)
+#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _CACHE_CACHED)
+
+#define _PAGE_IOREMAP \
+ (_PAGE_PRESENT | __READABLE | __WRITEABLE | _PAGE_GLOBAL | \
+ _CACHE_UNCACHED | _PAGE_SO)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED
+
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern void load_pgd(unsigned long pg_dir);
+extern pte_t invalid_pte_table[PTRS_PER_PTE];
+
+static inline void set_pte(pte_t *p, pte_t pte)
+{
+ *p = pte;
+#if defined(CONFIG_CPU_NEED_TLBSYNC)
+ dcache_wb_line((u32)p);
+#endif
+ /* prevent out of order excution */
+ smp_mb();
+}
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+
+static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+{
+ unsigned long ptr;
+
+ ptr = pmd_val(pmd);
+
+ return __va(ptr);
+}
+
+#define pmd_phys(pmd) pmd_val(pmd)
+
+static inline void set_pmd(pmd_t *p, pmd_t pmd)
+{
+ *p = pmd;
+#if defined(CONFIG_CPU_NEED_TLBSYNC)
+ dcache_wb_line((u32)p);
+#endif
+ /* prevent specul excute */
+ smp_mb();
+}
+
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return pmd_val(pmd) == __pa(invalid_pte_table);
+}
+
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return (pmd_val(pmd) != __pa(invalid_pte_table));
+}
+
+static inline void pmd_clear(pmd_t *p)
+{
+ pmd_val(*p) = (__pa(invalid_pte_table));
+#if defined(CONFIG_CPU_NEED_TLBSYNC)
+ dcache_wb_line((u32)p);
+#endif
+}
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_read(pte_t pte)
+{
+ return pte.pte_low & _PAGE_READ;
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return (pte).pte_low & _PAGE_WRITE;
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return (pte).pte_low & _PAGE_MODIFIED;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return (pte).pte_low & _PAGE_ACCESSED;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID);
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pte_val(pte) & _PAGE_MODIFIED)
+ pte_val(pte) |= _PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_MODIFIED;
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte_val(pte) |= _PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_ACCESSED;
+ if (pte_val(pte) & _PAGE_READ)
+ pte_val(pte) |= _PAGE_VALID;
+ return pte;
+}
+
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+
+/*
+ * Macro to make mark a page protection value as "uncacheable". Note
+ * that "protection" is really a misnomer here as the protection value
+ * contains the memory attribute bits, dirty bits, and various other
+ * bits as well.
+ */
+#define pgprot_noncached pgprot_noncached
+
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
+
+ return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
+
+ return __pgprot(prot);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+ (pgprot_val(newprot)));
+}
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init(void);
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte);
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#endif /* __ASM_CSKY_PGTABLE_H */
diff --git a/arch/csky/include/asm/probes.h b/arch/csky/include/asm/probes.h
new file mode 100644
index 000000000..5e526334e
--- /dev/null
+++ b/arch/csky/include/asm/probes.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PROBES_H
+#define __ASM_CSKY_PROBES_H
+
+typedef u32 probe_opcode_t;
+typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
+
+/* architecture specific copy of original instruction */
+struct arch_probe_insn {
+ probe_opcode_t *insn;
+ probes_handler_t *handler;
+ /* restore address after simulation */
+ unsigned long restore;
+};
+
+#ifdef CONFIG_KPROBES
+typedef u32 kprobe_opcode_t;
+struct arch_specific_insn {
+ struct arch_probe_insn api;
+};
+#endif
+
+#endif /* __ASM_CSKY_PROBES_H */
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
new file mode 100644
index 000000000..4800f6563
--- /dev/null
+++ b/arch/csky/include/asm/processor.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_PROCESSOR_H
+#define __ASM_CSKY_PROCESSOR_H
+
+#include <linux/bitops.h>
+#include <asm/segment.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/cache.h>
+#include <abi/reg_ops.h>
+#include <abi/regdef.h>
+#include <abi/switch_context.h>
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+#endif
+
+struct cpuinfo_csky {
+ unsigned long asid_cache;
+} __aligned(SMP_CACHE_BYTES);
+
+extern struct cpuinfo_csky cpu_data[];
+
+/*
+ * User space process size: 2GB. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing. TASK_SIZE
+ * for a 64 bit kernel expandable to 8192EB, of which the current CSKY
+ * implementations will "only" be able to use 1TB ...
+ */
+#define TASK_SIZE 0x7fff8000UL
+
+#ifdef __KERNEL__
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+#endif
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+struct thread_struct {
+ unsigned long sp; /* kernel stack pointer */
+ unsigned long trap_no; /* saved status register */
+
+ /* FPU regs */
+ struct user_fp __aligned(16) user_fp;
+};
+
+#define INIT_THREAD { \
+ .sp = sizeof(init_stack) + (unsigned long) &init_stack, \
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * pass the data segment into user programs if it exists,
+ * it can't hurt anything as far as I can tell
+ */
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ set_fs(USER_DS); /* reads from user space */ \
+ (_regs)->pc = (_pc); \
+ (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \
+ (_regs)->regs[2] = 0; \
+ (_regs)->regs[3] = 0; /* ABIV2 is R7, use it? */ \
+ (_regs)->sr &= ~PS_S; \
+ (_regs)->usp = (_usp); \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp)
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
+
+#define cpu_relax() barrier()
+
+#endif /* __ASM_CSKY_PROCESSOR_H */
diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h
new file mode 100644
index 000000000..91ceb1b45
--- /dev/null
+++ b/arch/csky/include/asm/ptrace.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_PTRACE_H
+#define __ASM_CSKY_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+#include <asm/traps.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+#define PS_S 0x80000000 /* Supervisor Mode */
+
+#define USR_BKPT 0x1464
+
+#define arch_has_single_step() (1)
+#define current_pt_regs() \
+({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; })
+
+#define user_stack_pointer(regs) ((regs)->usp)
+
+#define user_mode(regs) (!((regs)->sr & PS_S))
+#define instruction_pointer(regs) ((regs)->pc)
+#define profile_pc(regs) instruction_pointer(regs)
+#define trap_no(regs) ((regs->sr >> 16) & 0xff)
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->pc = val;
+}
+
+#if defined(__CSKYABIV2__)
+#define MAX_REG_OFFSET offsetof(struct pt_regs, dcsr)
+#else
+#define MAX_REG_OFFSET offsetof(struct pt_regs, regs[9])
+#endif
+
+static inline bool in_syscall(struct pt_regs const *regs)
+{
+ return ((regs->sr >> 16) & 0xff) == VEC_TRAP0;
+}
+
+static inline void forget_syscall(struct pt_regs *regs)
+{
+ regs->sr &= ~(0xff << 16);
+}
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void regs_set_return_value(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->a0 = val;
+}
+
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->usp;
+}
+
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+ return regs->regs[4];
+}
+static inline void frame_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->regs[4] = val;
+}
+
+extern int regs_query_register_offset(const char *name);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+/*
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_CSKY_PTRACE_H */
diff --git a/arch/csky/include/asm/reg_ops.h b/arch/csky/include/asm/reg_ops.h
new file mode 100644
index 000000000..cccf7d525
--- /dev/null
+++ b/arch/csky/include/asm/reg_ops.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_REGS_OPS_H
+#define __ASM_REGS_OPS_H
+
+#define mfcr(reg) \
+({ \
+ unsigned int tmp; \
+ asm volatile( \
+ "mfcr %0, "reg"\n" \
+ : "=r"(tmp) \
+ : \
+ : "memory"); \
+ tmp; \
+})
+
+#define mtcr(reg, val) \
+({ \
+ asm volatile( \
+ "mtcr %0, "reg"\n" \
+ : \
+ : "r"(val) \
+ : "memory"); \
+})
+
+#endif /* __ASM_REGS_OPS_H */
diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h
new file mode 100644
index 000000000..79ede9b1a
--- /dev/null
+++ b/arch/csky/include/asm/segment.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_SEGMENT_H
+#define __ASM_CSKY_SEGMENT_H
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+#define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF })
+
+#define USER_DS ((mm_segment_t) { 0x80000000UL })
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
+
+#endif /* __ASM_CSKY_SEGMENT_H */
diff --git a/arch/csky/include/asm/shmparam.h b/arch/csky/include/asm/shmparam.h
new file mode 100644
index 000000000..efafe4c79
--- /dev/null
+++ b/arch/csky/include/asm/shmparam.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_SHMPARAM_H
+#define __ASM_CSKY_SHMPARAM_H
+
+#define SHMLBA (4 * PAGE_SIZE)
+
+#define __ARCH_FORCE_SHMLBA
+
+#endif /* __ASM_CSKY_SHMPARAM_H */
diff --git a/arch/csky/include/asm/smp.h b/arch/csky/include/asm/smp.h
new file mode 100644
index 000000000..668b79ce2
--- /dev/null
+++ b/arch/csky/include/asm/smp.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SMP_H
+#define __ASM_CSKY_SMP_H
+
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+#include <linux/threads.h>
+
+#ifdef CONFIG_SMP
+
+void __init setup_smp(void);
+
+void __init setup_smp_ipi(void);
+
+void arch_send_call_function_ipi_mask(struct cpumask *mask);
+
+void arch_send_call_function_single_ipi(int cpu);
+
+void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq);
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+int __cpu_disable(void);
+
+void __cpu_die(unsigned int cpu);
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_CSKY_SMP_H */
diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h
new file mode 100644
index 000000000..7cf3f2b34
--- /dev/null
+++ b/arch/csky/include/asm/spinlock.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_H
+#define __ASM_CSKY_SPINLOCK_H
+
+#include <linux/spinlock_types.h>
+#include <asm/barrier.h>
+
+#ifdef CONFIG_QUEUED_RWLOCKS
+
+/*
+ * Ticket-based spin-locking.
+ */
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ arch_spinlock_t lockval;
+ u32 ticket_next = 1 << TICKET_NEXT;
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%2) \n"
+ " mov %1, %0 \n"
+ " add %0, %3 \n"
+ " stex.w %0, (%2) \n"
+ " bez %0, 1b \n"
+ : "=&r" (tmp), "=&r" (lockval)
+ : "r"(p), "r"(ticket_next)
+ : "cc");
+
+ while (lockval.tickets.next != lockval.tickets.owner)
+ lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
+
+ smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ u32 tmp, contended, res;
+ u32 ticket_next = 1 << TICKET_NEXT;
+ u32 *p = &lock->lock;
+
+ do {
+ asm volatile (
+ " ldex.w %0, (%3) \n"
+ " movi %2, 1 \n"
+ " rotli %1, %0, 16 \n"
+ " cmpne %1, %0 \n"
+ " bt 1f \n"
+ " movi %2, 0 \n"
+ " add %0, %0, %4 \n"
+ " stex.w %0, (%3) \n"
+ "1: \n"
+ : "=&r" (res), "=&r" (tmp), "=&r" (contended)
+ : "r"(p), "r"(ticket_next)
+ : "cc");
+ } while (!res);
+
+ if (!contended)
+ smp_mb();
+
+ return !contended;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+ WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1);
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.owner == lock.tickets.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ struct __raw_tickets tickets = READ_ONCE(lock->tickets);
+
+ return (tickets.next - tickets.owner) > 1;
+}
+#define arch_spin_is_contended arch_spin_is_contended
+
+#include <asm/qrwlock.h>
+
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
+
+#else /* CONFIG_QUEUED_RWLOCKS */
+
+/*
+ * Test-and-set spin-locking.
+ */
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " bnez %0, 1b \n"
+ " movi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+ smp_mb();
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+ WRITE_ONCE(lock->lock, 0);
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " bnez %0, 2f \n"
+ " movi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ " movi %0, 0 \n"
+ "2: \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+
+ if (!tmp)
+ smp_mb();
+
+ return !tmp;
+}
+
+#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
+
+/*
+ * read lock/unlock/trylock
+ */
+static inline void arch_read_lock(arch_rwlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " blz %0, 1b \n"
+ " addi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+ smp_mb();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ smp_mb();
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " subi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " blz %0, 2f \n"
+ " addi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ " movi %0, 0 \n"
+ "2: \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+
+ if (!tmp)
+ smp_mb();
+
+ return !tmp;
+}
+
+/*
+ * write lock/unlock/trylock
+ */
+static inline void arch_write_lock(arch_rwlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " bnez %0, 1b \n"
+ " subi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+ smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *lock)
+{
+ smp_mb();
+ WRITE_ONCE(lock->lock, 0);
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+ u32 *p = &lock->lock;
+ u32 tmp;
+
+ asm volatile (
+ "1: ldex.w %0, (%1) \n"
+ " bnez %0, 2f \n"
+ " subi %0, 1 \n"
+ " stex.w %0, (%1) \n"
+ " bez %0, 1b \n"
+ " movi %0, 0 \n"
+ "2: \n"
+ : "=&r" (tmp)
+ : "r"(p)
+ : "cc");
+
+ if (!tmp)
+ smp_mb();
+
+ return !tmp;
+}
+
+#endif /* CONFIG_QUEUED_RWLOCKS */
+#endif /* __ASM_CSKY_SPINLOCK_H */
diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h
new file mode 100644
index 000000000..88b82438b
--- /dev/null
+++ b/arch/csky/include/asm/spinlock_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_TYPES_H
+#define __ASM_CSKY_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+#define TICKET_NEXT 16
+
+typedef struct {
+ union {
+ u32 lock;
+ struct __raw_tickets {
+ /* little endian */
+ u16 owner;
+ u16 next;
+ } tickets;
+ };
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
+
+#ifdef CONFIG_QUEUED_RWLOCKS
+#include <asm-generic/qrwlock_types.h>
+
+#else /* CONFIG_NR_CPUS > 2 */
+
+typedef struct {
+ u32 lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif /* CONFIG_QUEUED_RWLOCKS */
+#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */
diff --git a/arch/csky/include/asm/stackprotector.h b/arch/csky/include/asm/stackprotector.h
new file mode 100644
index 000000000..d7cd4e51e
--- /dev/null
+++ b/arch/csky/include/asm/stackprotector.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* __ASM_SH_STACKPROTECTOR_H */
diff --git a/arch/csky/include/asm/string.h b/arch/csky/include/asm/string.h
new file mode 100644
index 000000000..73142de18
--- /dev/null
+++ b/arch/csky/include/asm/string.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef _CSKY_STRING_MM_H_
+#define _CSKY_STRING_MM_H_
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <abi/string.h>
+#endif
+
+#endif /* _CSKY_STRING_MM_H_ */
diff --git a/arch/csky/include/asm/switch_to.h b/arch/csky/include/asm/switch_to.h
new file mode 100644
index 000000000..35a39e889
--- /dev/null
+++ b/arch/csky/include/asm/switch_to.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_SWITCH_TO_H
+#define __ASM_CSKY_SWITCH_TO_H
+
+#include <linux/thread_info.h>
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+static inline void __switch_to_fpu(struct task_struct *prev,
+ struct task_struct *next)
+{
+ save_to_user_fp(&prev->thread.user_fp);
+ restore_from_user_fp(&next->thread.user_fp);
+}
+#else
+static inline void __switch_to_fpu(struct task_struct *prev,
+ struct task_struct *next)
+{}
+#endif
+
+/*
+ * Context switching is now performed out-of-line in switch_to.S
+ */
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(prev, next, last) \
+ do { \
+ struct task_struct *__prev = (prev); \
+ struct task_struct *__next = (next); \
+ __switch_to_fpu(__prev, __next); \
+ ((last) = __switch_to((prev), (next))); \
+ } while (0)
+
+#endif /* __ASM_CSKY_SWITCH_TO_H */
diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h
new file mode 100644
index 000000000..f624fa3bb
--- /dev/null
+++ b/arch/csky/include/asm/syscall.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_SYSCALL_H
+#define __ASM_SYSCALL_H
+
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <abi/regdef.h>
+#include <uapi/linux/audit.h>
+
+extern void *sys_call_table[];
+
+static inline int
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs_syscallid(regs);
+}
+
+static inline void
+syscall_set_nr(struct task_struct *task, struct pt_regs *regs,
+ int sysno)
+{
+ regs_syscallid(regs) = sysno;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ regs->a0 = regs->orig_a0;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long error = regs->a0;
+
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ regs->a0 = (long) error ?: val;
+}
+
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *args)
+{
+ args[0] = regs->orig_a0;
+ args++;
+ memcpy(args, &regs->a1, 5 * sizeof(args[0]));
+}
+
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ const unsigned long *args)
+{
+ regs->orig_a0 = args[0];
+ args++;
+ memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
+}
+
+static inline int
+syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_CSKY;
+}
+
+#endif /* __ASM_SYSCALL_H */
diff --git a/arch/csky/include/asm/syscalls.h b/arch/csky/include/asm/syscalls.h
new file mode 100644
index 000000000..5d48e5e00
--- /dev/null
+++ b/arch/csky/include/asm/syscalls.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_SYSCALLS_H
+#define __ASM_CSKY_SYSCALLS_H
+
+#include <asm-generic/syscalls.h>
+
+long sys_cacheflush(void __user *, unsigned long, int);
+
+long sys_set_thread_area(unsigned long addr);
+
+long sys_csky_fadvise64_64(int fd, int advice, loff_t offset, loff_t len);
+
+#endif /* __ASM_CSKY_SYSCALLS_H */
diff --git a/arch/csky/include/asm/tcm.h b/arch/csky/include/asm/tcm.h
new file mode 100644
index 000000000..bd1e662ec
--- /dev/null
+++ b/arch/csky/include/asm/tcm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_TCM_H
+#define __ASM_CSKY_TCM_H
+
+#ifndef CONFIG_HAVE_TCM
+#error "You should not be including tcm.h unless you have a TCM!"
+#endif
+
+#include <linux/compiler.h>
+
+/* Tag variables with this */
+#define __tcmdata __section(".tcm.data")
+/* Tag constants with this */
+#define __tcmconst __section(".tcm.rodata")
+/* Tag functions inside TCM called from outside TCM with this */
+#define __tcmfunc __section(".tcm.text") noinline
+/* Tag function inside TCM called from inside TCM with this */
+#define __tcmlocalfunc __section(".tcm.text")
+
+void *tcm_alloc(size_t len);
+void tcm_free(void *addr, size_t len);
+
+#endif
diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h
new file mode 100644
index 000000000..21456a373
--- /dev/null
+++ b/arch/csky/include/asm/thread_info.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef _ASM_CSKY_THREAD_INFO_H
+#define _ASM_CSKY_THREAD_INFO_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/version.h>
+#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <abi/switch_context.h>
+
+struct thread_info {
+ struct task_struct *task;
+ void *dump_exec_domain;
+ unsigned long flags;
+ int preempt_count;
+ unsigned long tp_value;
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+ struct pt_regs *regs;
+ unsigned int cpu;
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .cpu = 0, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
+
+#define thread_saved_fp(tsk) \
+ ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r8))
+
+#define thread_saved_sp(tsk) \
+ ((unsigned long)(tsk->thread.sp))
+
+#define thread_saved_lr(tsk) \
+ ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r15))
+
+static inline struct thread_info *current_thread_info(void)
+{
+ unsigned long sp;
+
+ asm volatile("mov %0, sp\n":"=r"(sp));
+
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_UPROBE 3 /* uprobe breakpoint or singlestep */
+#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
+#define TIF_SYSCALL_TRACEPOINT 5 /* syscall tracepoint instrumentation */
+#define TIF_SYSCALL_AUDIT 6 /* syscall auditing */
+#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
+#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
+#define TIF_SECCOMP 21 /* secure computing */
+
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_MEMDIE (1 << TIF_MEMDIE)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NOTIFY_SIGNAL)
+
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+
+#endif /* _ASM_CSKY_THREAD_INFO_H */
diff --git a/arch/csky/include/asm/tlb.h b/arch/csky/include/asm/tlb.h
new file mode 100644
index 000000000..fdff9b8d7
--- /dev/null
+++ b/arch/csky/include/asm/tlb.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_TLB_H
+#define __ASM_CSKY_TLB_H
+
+#include <asm/cacheflush.h>
+
+#define tlb_start_vma(tlb, vma) \
+ do { \
+ if (!(tlb)->fullmm) \
+ flush_cache_range(vma, (vma)->vm_start, (vma)->vm_end); \
+ } while (0)
+
+#define tlb_end_vma(tlb, vma) \
+ do { \
+ if (!(tlb)->fullmm) \
+ flush_tlb_range(vma, (vma)->vm_start, (vma)->vm_end); \
+ } while (0)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif /* __ASM_CSKY_TLB_H */
diff --git a/arch/csky/include/asm/tlbflush.h b/arch/csky/include/asm/tlbflush.h
new file mode 100644
index 000000000..6845b0667
--- /dev/null
+++ b/arch/csky/include/asm/tlbflush.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_TLBFLUSH_H
+#define __ASM_TLBFLUSH_H
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ */
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+extern void flush_tlb_one(unsigned long vaddr);
+
+#endif
diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h
new file mode 100644
index 000000000..1c081805b
--- /dev/null
+++ b/arch/csky/include/asm/traps.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_TRAPS_H
+#define __ASM_CSKY_TRAPS_H
+
+#define VEC_RESET 0
+#define VEC_ALIGN 1
+#define VEC_ACCESS 2
+#define VEC_ZERODIV 3
+#define VEC_ILLEGAL 4
+#define VEC_PRIV 5
+#define VEC_TRACE 6
+#define VEC_BREAKPOINT 7
+#define VEC_UNRECOVER 8
+#define VEC_SOFTRESET 9
+#define VEC_AUTOVEC 10
+#define VEC_FAUTOVEC 11
+#define VEC_HWACCEL 12
+
+#define VEC_TLBMISS 14
+#define VEC_TLBMODIFIED 15
+
+#define VEC_TRAP0 16
+#define VEC_TRAP1 17
+#define VEC_TRAP2 18
+#define VEC_TRAP3 19
+
+#define VEC_TLBINVALIDL 20
+#define VEC_TLBINVALIDS 21
+
+#define VEC_PRFL 29
+#define VEC_FPE 30
+
+extern void *vec_base[];
+
+#define VEC_INIT(i, func) \
+do { \
+ vec_base[i] = (void *)func; \
+} while (0)
+
+void csky_alignment(struct pt_regs *regs);
+
+#endif /* __ASM_CSKY_TRAPS_H */
diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h
new file mode 100644
index 000000000..1633ffe5a
--- /dev/null
+++ b/arch/csky/include/asm/uaccess.h
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_UACCESS_H
+#define __ASM_CSKY_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <asm/segment.h>
+
+static inline int access_ok(const void *addr, unsigned long size)
+{
+ unsigned long limit = current_thread_info()->addr_limit.seg;
+
+ return (((unsigned long)addr < limit) &&
+ ((unsigned long)(addr + size) < limit));
+}
+
+#define __addr_ok(addr) (access_ok(addr, 0))
+
+extern int __put_user_bad(void);
+
+/*
+ * Tell gcc we read from memory instead of writing: this is because
+ * we do not write to any memory gcc knows about, so there are no
+ * aliasing issues.
+ */
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the ugliness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on
+ * Ckcore, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
+ */
+
+#define put_user(x, ptr) \
+ __put_user_check((x), (ptr), sizeof(*(ptr)))
+
+#define __put_user(x, ptr) \
+ __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+#define __ptr(x) ((unsigned long *)(x))
+
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err = 0; \
+ typeof(*(ptr)) *__pu_addr = (ptr); \
+ typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
+ if (__pu_addr) \
+ __put_user_size(__pu_val, (__pu_addr), (size), \
+ __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ typeof(*(ptr)) *__pu_addr = (ptr); \
+ typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
+ if (access_ok(__pu_addr, size) && __pu_addr) \
+ __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: \
+ __put_user_asm_b(x, ptr, retval); \
+ break; \
+ case 2: \
+ __put_user_asm_h(x, ptr, retval); \
+ break; \
+ case 4: \
+ __put_user_asm_w(x, ptr, retval); \
+ break; \
+ case 8: \
+ __put_user_asm_64(x, ptr, retval); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ } \
+} while (0)
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ *
+ * Note that PC at a fault is the address *after* the faulting
+ * instruction.
+ */
+#define __put_user_asm_b(x, ptr, err) \
+do { \
+ int errcode; \
+ asm volatile( \
+ "1: stb %1, (%2,0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %3 \n" \
+ " br 3f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b,2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __put_user_asm_h(x, ptr, err) \
+do { \
+ int errcode; \
+ asm volatile( \
+ "1: sth %1, (%2,0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %3 \n" \
+ " br 3f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b,2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __put_user_asm_w(x, ptr, err) \
+do { \
+ int errcode; \
+ asm volatile( \
+ "1: stw %1, (%2,0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %3 \n" \
+ " br 3f \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __put_user_asm_64(x, ptr, err) \
+do { \
+ int tmp; \
+ int errcode; \
+ typeof(*(ptr))src = (typeof(*(ptr)))x; \
+ typeof(*(ptr))*psrc = &src; \
+ \
+ asm volatile( \
+ " ldw %3, (%1, 0) \n" \
+ "1: stw %3, (%2, 0) \n" \
+ " ldw %3, (%1, 4) \n" \
+ "2: stw %3, (%2, 4) \n" \
+ " br 4f \n" \
+ "3: mov %0, %4 \n" \
+ " br 4f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 3b \n" \
+ ".long 2b, 3b \n" \
+ ".previous \n" \
+ "4: \n" \
+ : "=r"(err), "=r"(psrc), "=r"(ptr), \
+ "=r"(tmp), "=r"(errcode) \
+ : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ __get_user_size(x, (ptr), (size), __gu_err); \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ int __gu_err = -EFAULT; \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ if (access_ok(__gu_ptr, size) && __gu_ptr) \
+ __get_user_size(x, __gu_ptr, size, __gu_err); \
+ __gu_err; \
+})
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ switch (size) { \
+ case 1: \
+ __get_user_asm_common((x), ptr, "ldb", retval); \
+ break; \
+ case 2: \
+ __get_user_asm_common((x), ptr, "ldh", retval); \
+ break; \
+ case 4: \
+ __get_user_asm_common((x), ptr, "ldw", retval); \
+ break; \
+ default: \
+ x = 0; \
+ (retval) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm_common(x, ptr, ins, err) \
+do { \
+ int errcode; \
+ asm volatile( \
+ "1: " ins " %1, (%4,0) \n" \
+ " br 3f \n" \
+ /* Fix up codes */ \
+ "2: mov %0, %2 \n" \
+ " movi %1, 0 \n" \
+ " br 3f \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(errcode) \
+ : "0"(0), "r"(ptr), "2"(-EFAULT) \
+ : "memory"); \
+} while (0)
+
+extern int __get_user_bad(void);
+
+#define ___copy_to_user(to, from, n) \
+do { \
+ int w0, w1, w2, w3; \
+ asm volatile( \
+ "0: cmpnei %1, 0 \n" \
+ " bf 8f \n" \
+ " mov %3, %1 \n" \
+ " or %3, %2 \n" \
+ " andi %3, 3 \n" \
+ " cmpnei %3, 0 \n" \
+ " bf 1f \n" \
+ " br 5f \n" \
+ "1: cmplti %0, 16 \n" /* 4W */ \
+ " bt 3f \n" \
+ " ldw %3, (%2, 0) \n" \
+ " ldw %4, (%2, 4) \n" \
+ " ldw %5, (%2, 8) \n" \
+ " ldw %6, (%2, 12) \n" \
+ "2: stw %3, (%1, 0) \n" \
+ "9: stw %4, (%1, 4) \n" \
+ "10: stw %5, (%1, 8) \n" \
+ "11: stw %6, (%1, 12) \n" \
+ " addi %2, 16 \n" \
+ " addi %1, 16 \n" \
+ " subi %0, 16 \n" \
+ " br 1b \n" \
+ "3: cmplti %0, 4 \n" /* 1W */ \
+ " bt 5f \n" \
+ " ldw %3, (%2, 0) \n" \
+ "4: stw %3, (%1, 0) \n" \
+ " addi %2, 4 \n" \
+ " addi %1, 4 \n" \
+ " subi %0, 4 \n" \
+ " br 3b \n" \
+ "5: cmpnei %0, 0 \n" /* 1B */ \
+ " bf 13f \n" \
+ " ldb %3, (%2, 0) \n" \
+ "6: stb %3, (%1, 0) \n" \
+ " addi %2, 1 \n" \
+ " addi %1, 1 \n" \
+ " subi %0, 1 \n" \
+ " br 5b \n" \
+ "7: subi %0, 4 \n" \
+ "8: subi %0, 4 \n" \
+ "12: subi %0, 4 \n" \
+ " br 13f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 2b, 13f \n" \
+ ".long 4b, 13f \n" \
+ ".long 6b, 13f \n" \
+ ".long 9b, 12b \n" \
+ ".long 10b, 8b \n" \
+ ".long 11b, 7b \n" \
+ ".previous \n" \
+ "13: \n" \
+ : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \
+ "=r"(w1), "=r"(w2), "=r"(w3) \
+ : "0"(n), "1"(to), "2"(from) \
+ : "memory"); \
+} while (0)
+
+#define ___copy_from_user(to, from, n) \
+do { \
+ int tmp; \
+ int nsave; \
+ asm volatile( \
+ "0: cmpnei %1, 0 \n" \
+ " bf 7f \n" \
+ " mov %3, %1 \n" \
+ " or %3, %2 \n" \
+ " andi %3, 3 \n" \
+ " cmpnei %3, 0 \n" \
+ " bf 1f \n" \
+ " br 5f \n" \
+ "1: cmplti %0, 16 \n" \
+ " bt 3f \n" \
+ "2: ldw %3, (%2, 0) \n" \
+ "10: ldw %4, (%2, 4) \n" \
+ " stw %3, (%1, 0) \n" \
+ " stw %4, (%1, 4) \n" \
+ "11: ldw %3, (%2, 8) \n" \
+ "12: ldw %4, (%2, 12) \n" \
+ " stw %3, (%1, 8) \n" \
+ " stw %4, (%1, 12) \n" \
+ " addi %2, 16 \n" \
+ " addi %1, 16 \n" \
+ " subi %0, 16 \n" \
+ " br 1b \n" \
+ "3: cmplti %0, 4 \n" \
+ " bt 5f \n" \
+ "4: ldw %3, (%2, 0) \n" \
+ " stw %3, (%1, 0) \n" \
+ " addi %2, 4 \n" \
+ " addi %1, 4 \n" \
+ " subi %0, 4 \n" \
+ " br 3b \n" \
+ "5: cmpnei %0, 0 \n" \
+ " bf 7f \n" \
+ "6: ldb %3, (%2, 0) \n" \
+ " stb %3, (%1, 0) \n" \
+ " addi %2, 1 \n" \
+ " addi %1, 1 \n" \
+ " subi %0, 1 \n" \
+ " br 5b \n" \
+ "8: stw %3, (%1, 0) \n" \
+ " subi %0, 4 \n" \
+ " bf 7f \n" \
+ "9: subi %0, 8 \n" \
+ " bf 7f \n" \
+ "13: stw %3, (%1, 8) \n" \
+ " subi %0, 12 \n" \
+ " bf 7f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 2b, 7f \n" \
+ ".long 4b, 7f \n" \
+ ".long 6b, 7f \n" \
+ ".long 10b, 8b \n" \
+ ".long 11b, 9b \n" \
+ ".long 12b,13b \n" \
+ ".previous \n" \
+ "7: \n" \
+ : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \
+ "=r"(tmp) \
+ : "0"(n), "1"(to), "2"(from) \
+ : "memory"); \
+} while (0)
+
+unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
+unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
+
+unsigned long clear_user(void *to, unsigned long n);
+unsigned long __clear_user(void __user *to, unsigned long n);
+
+long strncpy_from_user(char *dst, const char *src, long count);
+long __strncpy_from_user(char *dst, const char *src, long count);
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+long strnlen_user(const char *src, long n);
+
+#define strlen_user(str) strnlen_user(str, 32767)
+
+struct exception_table_entry {
+ unsigned long insn;
+ unsigned long nextinsn;
+};
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif /* __ASM_CSKY_UACCESS_H */
diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h
new file mode 100644
index 000000000..da7a18295
--- /dev/null
+++ b/arch/csky/include/asm/unistd.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/csky/include/asm/uprobes.h b/arch/csky/include/asm/uprobes.h
new file mode 100644
index 000000000..600388eb9
--- /dev/null
+++ b/arch/csky/include/asm/uprobes.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_UPROBES_H
+#define __ASM_CSKY_UPROBES_H
+
+#include <asm/probes.h>
+
+#define MAX_UINSN_BYTES 4
+
+#define UPROBE_SWBP_INSN USR_BKPT
+#define UPROBE_SWBP_INSN_SIZE 2
+#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
+
+typedef u32 uprobe_opcode_t;
+
+struct arch_uprobe_task {
+ unsigned long saved_trap_no;
+};
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u8 ixol[MAX_UINSN_BYTES];
+ };
+ struct arch_probe_insn api;
+ unsigned long insn_size;
+ bool simulate;
+};
+
+int uprobe_breakpoint_handler(struct pt_regs *regs);
+int uprobe_single_step_handler(struct pt_regs *regs);
+
+#endif /* __ASM_CSKY_UPROBES_H */
diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h
new file mode 100644
index 000000000..d963d691f
--- /dev/null
+++ b/arch/csky/include/asm/vdso.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_VDSO_H
+#define __ASM_CSKY_VDSO_H
+
+#include <abi/vdso.h>
+
+struct csky_vdso {
+ unsigned short rt_signal_retcode[4];
+};
+
+#endif /* __ASM_CSKY_VDSO_H */
diff --git a/arch/csky/include/asm/vmalloc.h b/arch/csky/include/asm/vmalloc.h
new file mode 100644
index 000000000..43dca6336
--- /dev/null
+++ b/arch/csky/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_CSKY_VMALLOC_H
+#define _ASM_CSKY_VMALLOC_H
+
+#endif /* _ASM_CSKY_VMALLOC_H */
diff --git a/arch/csky/include/uapi/asm/Kbuild b/arch/csky/include/uapi/asm/Kbuild
new file mode 100644
index 000000000..e78470141
--- /dev/null
+++ b/arch/csky/include/uapi/asm/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += ucontext.h
diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000..d150cd664
--- /dev/null
+++ b/arch/csky/include/uapi/asm/byteorder.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_BYTEORDER_H
+#define __ASM_CSKY_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* __ASM_CSKY_BYTEORDER_H */
diff --git a/arch/csky/include/uapi/asm/cachectl.h b/arch/csky/include/uapi/asm/cachectl.h
new file mode 100644
index 000000000..ed7fad1ea
--- /dev/null
+++ b/arch/csky/include/uapi/asm/cachectl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __ASM_CSKY_CACHECTL_H
+#define __ASM_CSKY_CACHECTL_H
+
+/*
+ * See "man cacheflush"
+ */
+#define ICACHE (1<<0)
+#define DCACHE (1<<1)
+#define BCACHE (ICACHE|DCACHE)
+
+#endif /* __ASM_CSKY_CACHECTL_H */
diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000..49d4e147a
--- /dev/null
+++ b/arch/csky/include/uapi/asm/perf_regs.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef _ASM_CSKY_PERF_REGS_H
+#define _ASM_CSKY_PERF_REGS_H
+
+/* Index of struct pt_regs */
+enum perf_event_csky_regs {
+ PERF_REG_CSKY_TLS,
+ PERF_REG_CSKY_LR,
+ PERF_REG_CSKY_PC,
+ PERF_REG_CSKY_SR,
+ PERF_REG_CSKY_SP,
+ PERF_REG_CSKY_ORIG_A0,
+ PERF_REG_CSKY_A0,
+ PERF_REG_CSKY_A1,
+ PERF_REG_CSKY_A2,
+ PERF_REG_CSKY_A3,
+ PERF_REG_CSKY_REGS0,
+ PERF_REG_CSKY_REGS1,
+ PERF_REG_CSKY_REGS2,
+ PERF_REG_CSKY_REGS3,
+ PERF_REG_CSKY_REGS4,
+ PERF_REG_CSKY_REGS5,
+ PERF_REG_CSKY_REGS6,
+ PERF_REG_CSKY_REGS7,
+ PERF_REG_CSKY_REGS8,
+ PERF_REG_CSKY_REGS9,
+#if defined(__CSKYABIV2__)
+ PERF_REG_CSKY_EXREGS0,
+ PERF_REG_CSKY_EXREGS1,
+ PERF_REG_CSKY_EXREGS2,
+ PERF_REG_CSKY_EXREGS3,
+ PERF_REG_CSKY_EXREGS4,
+ PERF_REG_CSKY_EXREGS5,
+ PERF_REG_CSKY_EXREGS6,
+ PERF_REG_CSKY_EXREGS7,
+ PERF_REG_CSKY_EXREGS8,
+ PERF_REG_CSKY_EXREGS9,
+ PERF_REG_CSKY_EXREGS10,
+ PERF_REG_CSKY_EXREGS11,
+ PERF_REG_CSKY_EXREGS12,
+ PERF_REG_CSKY_EXREGS13,
+ PERF_REG_CSKY_EXREGS14,
+ PERF_REG_CSKY_HI,
+ PERF_REG_CSKY_LO,
+ PERF_REG_CSKY_DCSR,
+#endif
+ PERF_REG_CSKY_MAX,
+};
+#endif /* _ASM_CSKY_PERF_REGS_H */
diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000..66b2268e3
--- /dev/null
+++ b/arch/csky/include/uapi/asm/ptrace.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef _CSKY_PTRACE_H
+#define _CSKY_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long tls;
+ unsigned long lr;
+ unsigned long pc;
+ unsigned long sr;
+ unsigned long usp;
+
+ /*
+ * a0, a1, a2, a3:
+ * abiv1: r2, r3, r4, r5
+ * abiv2: r0, r1, r2, r3
+ */
+ unsigned long orig_a0;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+
+ /*
+ * ABIV2: r4 ~ r13
+ * ABIV1: r6 ~ r14, r1
+ */
+ unsigned long regs[10];
+
+#if defined(__CSKYABIV2__)
+ /* r16 ~ r30 */
+ unsigned long exregs[15];
+
+ unsigned long rhi;
+ unsigned long rlo;
+ unsigned long dcsr;
+#endif
+};
+
+struct user_fp {
+ unsigned long vr[96];
+ unsigned long fcr;
+ unsigned long fesr;
+ unsigned long fid;
+ unsigned long reserved;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif /* _CSKY_PTRACE_H */
diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000..670c020f2
--- /dev/null
+++ b/arch/csky/include/uapi/asm/sigcontext.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_SIGCONTEXT_H
+#define __ASM_CSKY_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+struct sigcontext {
+ struct pt_regs sc_pt_regs;
+ struct user_fp sc_user_fp;
+};
+
+#endif /* __ASM_CSKY_SIGCONTEXT_H */
diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
new file mode 100644
index 000000000..ba4018929
--- /dev/null
+++ b/arch/csky/include/uapi/asm/unistd.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
+#include <asm-generic/unistd.h>
+
+#define __NR_set_thread_area (__NR_arch_specific_syscall + 0)
+__SYSCALL(__NR_set_thread_area, sys_set_thread_area)
+#define __NR_cacheflush (__NR_arch_specific_syscall + 1)
+__SYSCALL(__NR_cacheflush, sys_cacheflush)