summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/include')
-rw-r--r--arch/xtensa/include/asm/Kbuild10
-rw-r--r--arch/xtensa/include/asm/asm-offsets.h1
-rw-r--r--arch/xtensa/include/asm/asm-uaccess.h155
-rw-r--r--arch/xtensa/include/asm/asmmacro.h242
-rw-r--r--arch/xtensa/include/asm/atomic.h263
-rw-r--r--arch/xtensa/include/asm/barrier.h25
-rw-r--r--arch/xtensa/include/asm/bitops.h217
-rw-r--r--arch/xtensa/include/asm/bootparam.h50
-rw-r--r--arch/xtensa/include/asm/cache.h40
-rw-r--r--arch/xtensa/include/asm/cacheasm.h214
-rw-r--r--arch/xtensa/include/asm/cacheflush.h184
-rw-r--r--arch/xtensa/include/asm/checksum.h247
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h223
-rw-r--r--arch/xtensa/include/asm/coprocessor.h154
-rw-r--r--arch/xtensa/include/asm/core.h42
-rw-r--r--arch/xtensa/include/asm/current.h38
-rw-r--r--arch/xtensa/include/asm/delay.h75
-rw-r--r--arch/xtensa/include/asm/dma.h62
-rw-r--r--arch/xtensa/include/asm/elf.h180
-rw-r--r--arch/xtensa/include/asm/fixmap.h79
-rw-r--r--arch/xtensa/include/asm/flat.h19
-rw-r--r--arch/xtensa/include/asm/ftrace.h40
-rw-r--r--arch/xtensa/include/asm/futex.h163
-rw-r--r--arch/xtensa/include/asm/highmem.h73
-rw-r--r--arch/xtensa/include/asm/hw_breakpoint.h61
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h245
-rw-r--r--arch/xtensa/include/asm/io.h73
-rw-r--r--arch/xtensa/include/asm/irq.h42
-rw-r--r--arch/xtensa/include/asm/irqflags.h83
-rw-r--r--arch/xtensa/include/asm/jump_label.h65
-rw-r--r--arch/xtensa/include/asm/kasan.h39
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h104
-rw-r--r--arch/xtensa/include/asm/linkage.h9
-rw-r--r--arch/xtensa/include/asm/mmu.h22
-rw-r--r--arch/xtensa/include/asm/mmu_context.h159
-rw-r--r--arch/xtensa/include/asm/mxregs.h46
-rw-r--r--arch/xtensa/include/asm/nommu_context.h34
-rw-r--r--arch/xtensa/include/asm/page.h207
-rw-r--r--arch/xtensa/include/asm/pci-bridge.h85
-rw-r--r--arch/xtensa/include/asm/pci.h49
-rw-r--r--arch/xtensa/include/asm/perf_event.h4
-rw-r--r--arch/xtensa/include/asm/pgalloc.h69
-rw-r--r--arch/xtensa/include/asm/pgtable.h425
-rw-r--r--arch/xtensa/include/asm/platform.h66
-rw-r--r--arch/xtensa/include/asm/processor.h257
-rw-r--r--arch/xtensa/include/asm/ptrace.h117
-rw-r--r--arch/xtensa/include/asm/regs.h118
-rw-r--r--arch/xtensa/include/asm/serial.h18
-rw-r--r--arch/xtensa/include/asm/shmparam.h21
-rw-r--r--arch/xtensa/include/asm/signal.h23
-rw-r--r--arch/xtensa/include/asm/smp.h43
-rw-r--r--arch/xtensa/include/asm/spinlock.h20
-rw-r--r--arch/xtensa/include/asm/spinlock_types.h12
-rw-r--r--arch/xtensa/include/asm/stackprotector.h40
-rw-r--r--arch/xtensa/include/asm/stacktrace.h44
-rw-r--r--arch/xtensa/include/asm/string.h140
-rw-r--r--arch/xtensa/include/asm/switch_to.h22
-rw-r--r--arch/xtensa/include/asm/syscall.h87
-rw-r--r--arch/xtensa/include/asm/sysmem.h19
-rw-r--r--arch/xtensa/include/asm/thread_info.h139
-rw-r--r--arch/xtensa/include/asm/timex.h60
-rw-r--r--arch/xtensa/include/asm/tlb.h21
-rw-r--r--arch/xtensa/include/asm/tlbflush.h205
-rw-r--r--arch/xtensa/include/asm/traps.h118
-rw-r--r--arch/xtensa/include/asm/uaccess.h320
-rw-r--r--arch/xtensa/include/asm/ucontext.h22
-rw-r--r--arch/xtensa/include/asm/unaligned.h29
-rw-r--r--arch/xtensa/include/asm/unistd.h16
-rw-r--r--arch/xtensa/include/asm/vectors.h93
-rw-r--r--arch/xtensa/include/asm/vermagic.h17
-rw-r--r--arch/xtensa/include/asm/vmalloc.h4
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild2
-rw-r--r--arch/xtensa/include/uapi/asm/auxvec.h4
-rw-r--r--arch/xtensa/include/uapi/asm/byteorder.h13
-rw-r--r--arch/xtensa/include/uapi/asm/ioctls.h130
-rw-r--r--arch/xtensa/include/uapi/asm/ipcbuf.h40
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h117
-rw-r--r--arch/xtensa/include/uapi/asm/msgbuf.h50
-rw-r--r--arch/xtensa/include/uapi/asm/param.h31
-rw-r--r--arch/xtensa/include/uapi/asm/poll.h21
-rw-r--r--arch/xtensa/include/uapi/asm/posix_types.h40
-rw-r--r--arch/xtensa/include/uapi/asm/ptrace.h59
-rw-r--r--arch/xtensa/include/uapi/asm/sembuf.h45
-rw-r--r--arch/xtensa/include/uapi/asm/setup.h17
-rw-r--r--arch/xtensa/include/uapi/asm/shmbuf.h51
-rw-r--r--arch/xtensa/include/uapi/asm/sigcontext.h29
-rw-r--r--arch/xtensa/include/uapi/asm/signal.h134
-rw-r--r--arch/xtensa/include/uapi/asm/sockios.h32
-rw-r--r--arch/xtensa/include/uapi/asm/stat.h60
-rw-r--r--arch/xtensa/include/uapi/asm/swab.h71
-rw-r--r--arch/xtensa/include/uapi/asm/termbits.h221
-rw-r--r--arch/xtensa/include/uapi/asm/types.h30
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h26
93 files changed, 7931 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
new file mode 100644
index 000000000..adefb1636
--- /dev/null
+++ b/arch/xtensa/include/asm/Kbuild
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+generated-y += syscall_table.h
+generic-y += extable.h
+generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
+generic-y += param.h
+generic-y += qrwlock.h
+generic-y += qspinlock.h
+generic-y += seccomp.h
+generic-y += user.h
diff --git a/arch/xtensa/include/asm/asm-offsets.h b/arch/xtensa/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h
new file mode 100644
index 000000000..7f6cf4151
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-uaccess.h
@@ -0,0 +1,155 @@
+/*
+ * include/asm-xtensa/uaccess.h
+ *
+ * User space memory access functions
+ *
+ * These routines provide basic accessing functions to the user memory
+ * space for the kernel. This header file provides functions such as:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ASM_UACCESS_H
+#define _XTENSA_ASM_UACCESS_H
+
+#include <linux/errno.h>
+#include <asm/types.h>
+
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+
+/*
+ * These assembly macros mirror the C macros in asm/uaccess.h. They
+ * should always have identical functionality. See
+ * arch/xtensa/kernel/sys.S for usage.
+ */
+
+#define KERNEL_DS 0
+#define USER_DS 1
+
+/*
+ * get_fs reads current->thread.current_ds into a register.
+ * On Entry:
+ * <ad> anything
+ * <sp> stack
+ * On Exit:
+ * <ad> contains current->thread.current_ds
+ */
+ .macro get_fs ad, sp
+ GET_CURRENT(\ad,\sp)
+#if THREAD_CURRENT_DS > 1020
+ addi \ad, \ad, TASK_THREAD
+ l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
+#else
+ l32i \ad, \ad, THREAD_CURRENT_DS
+#endif
+ .endm
+
+/*
+ * set_fs sets current->thread.current_ds to some value.
+ * On Entry:
+ * <at> anything (temp register)
+ * <av> value to write
+ * <sp> stack
+ * On Exit:
+ * <at> destroyed (actually, current)
+ * <av> preserved, value to write
+ */
+ .macro set_fs at, av, sp
+ GET_CURRENT(\at,\sp)
+ s32i \av, \at, THREAD_CURRENT_DS
+ .endm
+
+/*
+ * kernel_ok determines whether we should bypass addr/size checking.
+ * See the equivalent C-macro version below for clarity.
+ * On success, kernel_ok branches to a label indicated by parameter
+ * <success>. This implies that the macro falls through to the next
+ * insruction on an error.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on error).
+ *
+ * On Entry:
+ * <at> anything (temp register)
+ * <success> label to branch to on success; implies
+ * fall-through macro on error
+ * <sp> stack pointer
+ * On Exit:
+ * <at> destroyed (actually, current->thread.current_ds)
+ */
+
+#if ((KERNEL_DS != 0) || (USER_DS == 0))
+# error Assembly macro kernel_ok fails
+#endif
+ .macro kernel_ok at, sp, success
+ get_fs \at, \sp
+ beqz \at, \success
+ .endm
+
+/*
+ * user_ok determines whether the access to user-space memory is allowed.
+ * See the equivalent C-macro version below for clarity.
+ *
+ * On error, user_ok branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on success).
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed (actually, (TASK_SIZE + 1 - size))
+ */
+ .macro user_ok aa, as, at, error
+ movi \at, __XTENSA_UL_CONST(TASK_SIZE)
+ bgeu \as, \at, \error
+ sub \at, \at, \as
+ bgeu \aa, \at, \error
+ .endm
+
+/*
+ * access_ok determines whether a memory access is allowed. See the
+ * equivalent C-macro version below for clarity.
+ *
+ * On error, access_ok branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that we assume success is the common case, and we optimize the
+ * branch fall-through case on success.
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <sp>
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed
+ */
+ .macro access_ok aa, as, at, sp, error
+ kernel_ok \at, \sp, .Laccess_ok_\@
+ user_ok \aa, \as, \at, \error
+.Laccess_ok_\@:
+ .endm
+
+#endif /* _XTENSA_ASM_UACCESS_H */
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
new file mode 100644
index 000000000..bfc89e11f
--- /dev/null
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -0,0 +1,242 @@
+/*
+ * include/asm-xtensa/asmmacro.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ASMMACRO_H
+#define _XTENSA_ASMMACRO_H
+
+#include <asm/core.h>
+
+/*
+ * Some little helpers for loops. Use zero-overhead-loops
+ * where applicable and if supported by the processor.
+ *
+ * __loopi ar, at, size, inc
+ * ar register initialized with the start address
+ * at scratch register used by macro
+ * size size immediate value
+ * inc increment
+ *
+ * __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
+ * ar register initialized with the start address
+ * as register initialized with the size
+ * at scratch register use by macro
+ * inc_log2 increment [in log2]
+ * mask_log2 mask [in log2]
+ * cond true condition (used in loop'cond')
+ * ncond false condition (used in b'ncond')
+ *
+ * __loop as
+ * restart loop. 'as' register must not have been modified!
+ *
+ * __endla ar, as, incr
+ * ar start address (modified)
+ * as scratch register used by __loops/__loopi macros or
+ * end address used by __loopt macro
+ * inc increment
+ */
+
+/*
+ * loop for given size as immediate
+ */
+
+ .macro __loopi ar, at, size, incr
+
+#if XCHAL_HAVE_LOOPS
+ movi \at, ((\size + \incr - 1) / (\incr))
+ loop \at, 99f
+#else
+ addi \at, \ar, \size
+ 98:
+#endif
+
+ .endm
+
+/*
+ * loop for given size in register
+ */
+
+ .macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
+
+#if XCHAL_HAVE_LOOPS
+ .ifgt \incr_log2 - 1
+ addi \at, \as, (1 << \incr_log2) - 1
+ .ifnc \mask_log2,
+ extui \at, \at, \incr_log2, \mask_log2
+ .else
+ srli \at, \at, \incr_log2
+ .endif
+ .endif
+ loop\cond \at, 99f
+#else
+ .ifnc \mask_log2,
+ extui \at, \as, \incr_log2, \mask_log2
+ .else
+ .ifnc \ncond,
+ srli \at, \as, \incr_log2
+ .endif
+ .endif
+ .ifnc \ncond,
+ b\ncond \at, 99f
+
+ .endif
+ .ifnc \mask_log2,
+ slli \at, \at, \incr_log2
+ add \at, \ar, \at
+ .else
+ add \at, \ar, \as
+ .endif
+#endif
+ 98:
+
+ .endm
+
+/*
+ * loop from ar to as
+ */
+
+ .macro __loopt ar, as, at, incr_log2
+
+#if XCHAL_HAVE_LOOPS
+ sub \at, \as, \ar
+ .ifgt \incr_log2 - 1
+ addi \at, \at, (1 << \incr_log2) - 1
+ srli \at, \at, \incr_log2
+ .endif
+ loop \at, 99f
+#else
+ 98:
+#endif
+
+ .endm
+
+/*
+ * restart loop. registers must be unchanged
+ */
+
+ .macro __loop as
+
+#if XCHAL_HAVE_LOOPS
+ loop \as, 99f
+#else
+ 98:
+#endif
+
+ .endm
+
+/*
+ * end of loop with no increment of the address.
+ */
+
+ .macro __endl ar, as
+#if !XCHAL_HAVE_LOOPS
+ bltu \ar, \as, 98b
+#endif
+ 99:
+ .endm
+
+/*
+ * end of loop with increment of the address.
+ */
+
+ .macro __endla ar, as, incr
+ addi \ar, \ar, \incr
+ __endl \ar \as
+ .endm
+
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(handler) \
+ .section __ex_table, "a"; \
+ .word 97f, handler; \
+ .previous \
+97:
+
+
+/*
+ * Extract unaligned word that is split between two registers w0 and w1
+ * into r regardless of machine endianness. SAR must be loaded with the
+ * starting bit of the word (see __ssa8).
+ */
+
+ .macro __src_b r, w0, w1
+#ifdef __XTENSA_EB__
+ src \r, \w0, \w1
+#else
+ src \r, \w1, \w0
+#endif
+ .endm
+
+/*
+ * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned
+ * word starting at r from two registers loaded from consecutive aligned
+ * addresses covering r regardless of machine endianness.
+ *
+ * r 0 1 2 3
+ * LE SAR 0 8 16 24
+ * BE SAR 32 24 16 8
+ */
+
+ .macro __ssa8 r
+#ifdef __XTENSA_EB__
+ ssa8b \r
+#else
+ ssa8l \r
+#endif
+ .endm
+
+#define XTENSA_STACK_ALIGNMENT 16
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+#define XTENSA_FRAME_SIZE_RESERVE 16
+#define XTENSA_SPILL_STACK_RESERVE 32
+
+#define abi_entry(frame_size) \
+ entry sp, (XTENSA_FRAME_SIZE_RESERVE + \
+ (((frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
+ -XTENSA_STACK_ALIGNMENT))
+#define abi_entry_default abi_entry(0)
+
+#define abi_ret(frame_size) retw
+#define abi_ret_default retw
+
+#elif defined(__XTENSA_CALL0_ABI__)
+
+#define XTENSA_SPILL_STACK_RESERVE 0
+
+#define abi_entry(frame_size) __abi_entry (frame_size)
+
+ .macro __abi_entry frame_size
+ .ifgt \frame_size
+ addi sp, sp, -(((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
+ -XTENSA_STACK_ALIGNMENT)
+ .endif
+ .endm
+
+#define abi_entry_default
+
+#define abi_ret(frame_size) __abi_ret (frame_size)
+
+ .macro __abi_ret frame_size
+ .ifgt \frame_size
+ addi sp, sp, (((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
+ -XTENSA_STACK_ALIGNMENT)
+ .endif
+ ret
+ .endm
+
+#define abi_ret_default ret
+
+#else
+#error Unsupported Xtensa ABI
+#endif
+
+#define __XTENSA_HANDLER .section ".exception.text", "ax"
+
+#endif /* _XTENSA_ASMMACRO_H */
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
new file mode 100644
index 000000000..744c2f463
--- /dev/null
+++ b/arch/xtensa/include/asm/atomic.h
@@ -0,0 +1,263 @@
+/*
+ * include/asm-xtensa/atomic.h
+ *
+ * Atomic operations that C can't guarantee us. Useful for resource counting..
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ATOMIC_H
+#define _XTENSA_ATOMIC_H
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <asm/processor.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+/*
+ * This Xtensa implementation assumes that the right mechanism
+ * for exclusion is for locking interrupts to level EXCM_LEVEL.
+ *
+ * Locking interrupts looks like this:
+ *
+ * rsil a15, TOPLEVEL
+ * <code>
+ * wsr a15, PS
+ * rsync
+ *
+ * Note that a15 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a15 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define atomic_read(v) READ_ONCE((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+
+#if XCHAL_HAVE_EXCLUSIVE
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
+ : "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return tmp; \
+}
+
+#elif XCHAL_HAVE_S32C1I
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned int vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15, "__stringify(TOPLEVEL)"\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ " s32i %[result], %[mem]\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "a15", "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned int vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15,"__stringify(TOPLEVEL)"\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ " s32i %[result], %[mem]\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "a15", "memory" \
+ ); \
+ \
+ return vval; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned int tmp, vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15,"__stringify(TOPLEVEL)"\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[tmp], %[result], %[i]\n" \
+ " s32i %[tmp], %[mem]\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : [result] "=&a" (vval), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "a15", "memory" \
+ ); \
+ \
+ return vval; \
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
+
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
new file mode 100644
index 000000000..d6f8d4ddc
--- /dev/null
+++ b/arch/xtensa/include/asm/barrier.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2012 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SYSTEM_H
+#define _XTENSA_SYSTEM_H
+
+#include <asm/core.h>
+
+#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
+#define rmb() barrier()
+#define wmb() mb()
+
+#if XCHAL_HAVE_S32C1I
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif /* _XTENSA_SYSTEM_H */
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
new file mode 100644
index 000000000..3f71d364b
--- /dev/null
+++ b/arch/xtensa/include/asm/bitops.h
@@ -0,0 +1,217 @@
+/*
+ * include/asm-xtensa/bitops.h
+ *
+ * Atomic operations that C can't guarantee us.Useful for resource counting etc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_BITOPS_H
+#define _XTENSA_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm/processor.h>
+#include <asm/byteorder.h>
+#include <asm/barrier.h>
+
+#include <asm-generic/bitops/non-atomic.h>
+
+#if XCHAL_HAVE_NSA
+
+static inline unsigned long __cntlz (unsigned long x)
+{
+ int lz;
+ asm ("nsau %0, %1" : "=r" (lz) : "r" (x));
+ return lz;
+}
+
+/*
+ * ffz: Find first zero in word. Undefined if no zero exists.
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+static inline int ffz(unsigned long x)
+{
+ return 31 - __cntlz(~x & -~x);
+}
+
+/*
+ * __ffs: Find first bit set in word. Return 0 for bit 0
+ */
+
+static inline unsigned long __ffs(unsigned long x)
+{
+ return 31 - __cntlz(x & -x);
+}
+
+/*
+ * ffs: Find first bit set in word. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+static inline int ffs(unsigned long x)
+{
+ return 32 - __cntlz(x & -x);
+}
+
+/*
+ * fls: Find last (most-significant) bit set in word.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static inline int fls (unsigned int x)
+{
+ return 32 - __cntlz(x);
+}
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ return 31 - __cntlz(word);
+}
+#else
+
+/* Use the generic implementation if we don't have the nsa/nsau instructions. */
+
+# include <asm-generic/bitops/ffs.h>
+# include <asm-generic/bitops/__ffs.h>
+# include <asm-generic/bitops/ffz.h>
+# include <asm-generic/bitops/fls.h>
+# include <asm-generic/bitops/__fls.h>
+
+#endif
+
+#include <asm-generic/bitops/fls64.h>
+
+#if XCHAL_HAVE_EXCLUSIVE
+
+#define BIT_OP(op, insn, inv) \
+static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+{ \
+ unsigned long tmp; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " "insn" %[tmp], %[tmp], %[mask]\n" \
+ " s32ex %[tmp], %[addr]\n" \
+ " getex %[tmp]\n" \
+ " beqz %[tmp], 1b\n" \
+ : [tmp] "=&a" (tmp) \
+ : [mask] "a" (inv mask), [addr] "a" (p) \
+ : "memory"); \
+}
+
+#define TEST_AND_BIT_OP(op, insn, inv) \
+static inline int \
+test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[value], %[addr]\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32ex %[tmp], %[addr]\n" \
+ " getex %[tmp]\n" \
+ " beqz %[tmp], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value) \
+ : [mask] "a" (inv mask), [addr] "a" (p) \
+ : "memory"); \
+ \
+ return value & mask; \
+}
+
+#elif XCHAL_HAVE_S32C1I
+
+#define BIT_OP(op, insn, inv) \
+static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[value], %[mem]\n" \
+ " wsr %[value], scompare1\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32c1i %[tmp], %[mem]\n" \
+ " bne %[tmp], %[value], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value), \
+ [mem] "+m" (*p) \
+ : [mask] "a" (inv mask) \
+ : "memory"); \
+}
+
+#define TEST_AND_BIT_OP(op, insn, inv) \
+static inline int \
+test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[value], %[mem]\n" \
+ " wsr %[value], scompare1\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32c1i %[tmp], %[mem]\n" \
+ " bne %[tmp], %[value], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value), \
+ [mem] "+m" (*p) \
+ : [mask] "a" (inv mask) \
+ : "memory"); \
+ \
+ return tmp & mask; \
+}
+
+#else
+
+#define BIT_OP(op, insn, inv)
+#define TEST_AND_BIT_OP(op, insn, inv)
+
+#include <asm-generic/bitops/atomic.h>
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+#define BIT_OPS(op, insn, inv) \
+ BIT_OP(op, insn, inv) \
+ TEST_AND_BIT_OP(op, insn, inv)
+
+BIT_OPS(set, "or", )
+BIT_OPS(clear, "and", ~)
+BIT_OPS(change, "xor", )
+
+#undef BIT_OPS
+#undef BIT_OP
+#undef TEST_AND_BIT_OP
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/sched.h>
+
+#endif /* _XTENSA_BITOPS_H */
diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h
new file mode 100644
index 000000000..892aab399
--- /dev/null
+++ b/arch/xtensa/include/asm/bootparam.h
@@ -0,0 +1,50 @@
+/*
+ * include/asm-xtensa/bootparam.h
+ *
+ * Definition of the Linux/Xtensa boot parameter structure
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * (Concept borrowed from the 68K port)
+ */
+
+#ifndef _XTENSA_BOOTPARAM_H
+#define _XTENSA_BOOTPARAM_H
+
+#define BP_VERSION 0x0001
+
+#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
+#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
+#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
+#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console. */
+#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
+#define BP_TAG_FDT 0x1006 /* flat device tree addr */
+
+#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
+#define BP_TAG_LAST 0x7E0B /* last tag */
+
+#ifndef __ASSEMBLY__
+
+/* All records are aligned to 4 bytes */
+
+typedef struct bp_tag {
+ unsigned short id; /* tag id */
+ unsigned short size; /* size of this record excluding the structure*/
+ unsigned long data[0]; /* data */
+} bp_tag_t;
+
+struct bp_meminfo {
+ unsigned long type;
+ unsigned long start;
+ unsigned long end;
+};
+
+#define MEMORY_TYPE_CONVENTIONAL 0x1000
+#define MEMORY_TYPE_NONE 0x2000
+
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
new file mode 100644
index 000000000..54e147ac2
--- /dev/null
+++ b/arch/xtensa/include/asm/cache.h
@@ -0,0 +1,40 @@
+/*
+ * include/asm-xtensa/cache.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHE_H
+#define _XTENSA_CACHE_H
+
+#include <asm/core.h>
+
+#define L1_CACHE_SHIFT XCHAL_DCACHE_LINEWIDTH
+#define L1_CACHE_BYTES XCHAL_DCACHE_LINESIZE
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
+#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
+#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
+#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
+
+/* Maximum cache size per way. */
+#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
+# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
+#else
+# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
+#endif
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+/*
+ * R/O after init is actually writable, it cannot go to .rodata
+ * according to vmlinux linker script.
+ */
+#define __ro_after_init __read_mostly
+
+#endif /* _XTENSA_CACHE_H */
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
new file mode 100644
index 000000000..34545ecfd
--- /dev/null
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -0,0 +1,214 @@
+/*
+ * include/asm-xtensa/cacheasm.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Tensilica Inc.
+ */
+
+#include <asm/cache.h>
+#include <asm/asmmacro.h>
+#include <linux/stringify.h>
+
+/*
+ * Define cache functions as macros here so that they can be used
+ * by the kernel and boot loader. We should consider moving them to a
+ * library that can be linked by both.
+ *
+ * Locking
+ *
+ * ___unlock_dcache_all
+ * ___unlock_icache_all
+ *
+ * Flush and invaldating
+ *
+ * ___flush_invalidate_dcache_{all|range|page}
+ * ___flush_dcache_{all|range|page}
+ * ___invalidate_dcache_{all|range|page}
+ * ___invalidate_icache_{all|range|page}
+ *
+ */
+
+
+ .macro __loop_cache_unroll ar at insn size line_width max_immed
+
+ .if (1 << (\line_width)) > (\max_immed)
+ .set _reps, 1
+ .elseif (2 << (\line_width)) > (\max_immed)
+ .set _reps, 2
+ .else
+ .set _reps, 4
+ .endif
+
+ __loopi \ar, \at, \size, (_reps << (\line_width))
+ .set _index, 0
+ .rep _reps
+ \insn \ar, _index << (\line_width)
+ .set _index, _index + 1
+ .endr
+ __endla \ar, \at, _reps << (\line_width)
+
+ .endm
+
+
+ .macro __loop_cache_all ar at insn size line_width max_immed
+
+ movi \ar, 0
+ __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
+
+ .endm
+
+
+ .macro __loop_cache_range ar as at insn line_width
+
+ extui \at, \ar, 0, \line_width
+ add \as, \as, \at
+
+ __loops \ar, \as, \at, \line_width
+ \insn \ar, 0
+ __endla \ar, \at, (1 << (\line_width))
+
+ .endm
+
+
+ .macro __loop_cache_page ar at insn line_width max_immed
+
+ __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
+
+ .endm
+
+
+ .macro ___unlock_dcache_all ar at
+
+#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___unlock_icache_all ar at
+
+#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
+ __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
+ XCHAL_ICACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___flush_invalidate_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_all ar at
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
+ XCHAL_ICACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+
+ .macro ___flush_invalidate_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_range ar as at
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+
+ .macro ___flush_invalidate_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_page ar as
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
+#endif
+
+ .endm
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
new file mode 100644
index 000000000..cf907e5bf
--- /dev/null
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -0,0 +1,184 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHEFLUSH_H
+#define _XTENSA_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+
+/*
+ * Lo-level routines for cache flushing.
+ *
+ * invalidate data or instruction cache:
+ *
+ * __invalidate_icache_all()
+ * __invalidate_icache_page(adr)
+ * __invalidate_dcache_page(adr)
+ * __invalidate_icache_range(from,size)
+ * __invalidate_dcache_range(from,size)
+ *
+ * flush data cache:
+ *
+ * __flush_dcache_page(adr)
+ *
+ * flush and invalidate data cache:
+ *
+ * __flush_invalidate_dcache_all()
+ * __flush_invalidate_dcache_page(adr)
+ * __flush_invalidate_dcache_range(from,size)
+ *
+ * specials for cache aliasing:
+ *
+ * __flush_invalidate_dcache_page_alias(vaddr,paddr)
+ * __invalidate_dcache_page_alias(vaddr,paddr)
+ * __invalidate_icache_page_alias(vaddr,paddr)
+ */
+
+extern void __invalidate_dcache_all(void);
+extern void __invalidate_icache_all(void);
+extern void __invalidate_dcache_page(unsigned long);
+extern void __invalidate_icache_page(unsigned long);
+extern void __invalidate_icache_range(unsigned long, unsigned long);
+extern void __invalidate_dcache_range(unsigned long, unsigned long);
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+extern void __flush_invalidate_dcache_all(void);
+extern void __flush_dcache_page(unsigned long);
+extern void __flush_dcache_range(unsigned long, unsigned long);
+extern void __flush_invalidate_dcache_page(unsigned long);
+extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
+#else
+static inline void __flush_dcache_page(unsigned long va)
+{
+}
+static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
+{
+}
+# define __flush_invalidate_dcache_all() __invalidate_dcache_all()
+# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
+# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
+#endif
+
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
+extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
+extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
+#else
+static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+static inline void __invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+#endif
+#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
+extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
+#else
+static inline void __invalidate_icache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+#endif
+
+/*
+ * We have physically tagged caches - nothing to do here -
+ * unless we have cache aliasing.
+ *
+ * Pages can get remapped. Because this might change the 'color' of that page,
+ * we have to flush the cache before the PTE is changed.
+ * (see also Documentation/core-api/cachetlb.rst)
+ */
+
+#if defined(CONFIG_MMU) && \
+ ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
+
+#ifdef CONFIG_SMP
+void flush_cache_all(void);
+void flush_cache_range(struct vm_area_struct*, ulong, ulong);
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_cache_page(struct vm_area_struct*,
+ unsigned long, unsigned long);
+#else
+#define flush_cache_all local_flush_cache_all
+#define flush_cache_range local_flush_cache_range
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page local_flush_cache_page
+#endif
+
+#define local_flush_cache_all() \
+ do { \
+ __flush_invalidate_dcache_all(); \
+ __invalidate_icache_all(); \
+ } while (0)
+
+#define flush_cache_mm(mm) flush_cache_all()
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+#define flush_cache_vmap(start,end) flush_cache_all()
+#define flush_cache_vunmap(start,end) flush_cache_all()
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page*);
+
+void local_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn);
+
+#else
+
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+
+#define flush_cache_vmap(start,end) do { } while (0)
+#define flush_cache_vunmap(start,end) do { } while (0)
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do { } while (0)
+
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page(vma, addr, pfn) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+
+#endif
+
+#define flush_icache_user_range flush_icache_range
+
+/* Ensure consistency between data and instruction cache. */
+#define local_flush_icache_range(start, end) \
+ do { \
+ __flush_dcache_range(start, (end) - (start)); \
+ __invalidate_icache_range(start,(end) - (start)); \
+ } while (0)
+
+/* This is not required, see Documentation/core-api/cachetlb.rst */
+#define flush_icache_page(vma,page) do { } while (0)
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+extern void copy_to_user_page(struct vm_area_struct*, struct page*,
+ unsigned long, void*, const void*, unsigned long);
+extern void copy_from_user_page(struct vm_area_struct*, struct page*,
+ unsigned long, void*, const void*, unsigned long);
+
+#else
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ __flush_dcache_range((unsigned long) dst, len); \
+ __invalidate_icache_range((unsigned long) dst, len); \
+ } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif
+
+#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
new file mode 100644
index 000000000..44ec1d0b2
--- /dev/null
+++ b/arch/xtensa/include/asm/checksum.h
@@ -0,0 +1,247 @@
+/*
+ * include/asm-xtensa/checksum.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CHECKSUM_H
+#define _XTENSA_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <linux/uaccess.h>
+#include <asm/core.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
+
+#define _HAVE_ARCH_CSUM_AND_COPY
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ */
+static inline
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ return csum_partial_copy_generic(src, dst, len);
+}
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+static inline
+__wsum csum_and_copy_from_user(const void __user *src, void *dst,
+ int len)
+{
+ if (!access_ok(src, len))
+ return 0;
+ return csum_partial_copy_generic((__force const void *)src, dst, len);
+}
+
+/*
+ * Fold a partial checksum
+ */
+
+static __inline__ __sum16 csum_fold(__wsum sum)
+{
+ unsigned int __dummy;
+ __asm__("extui %1, %0, 16, 16\n\t"
+ "extui %0 ,%0, 0, 16\n\t"
+ "add %0, %0, %1\n\t"
+ "slli %1, %0, 16\n\t"
+ "add %0, %0, %1\n\t"
+ "extui %0, %0, 16, 16\n\t"
+ "neg %0, %0\n\t"
+ "addi %0, %0, -1\n\t"
+ "extui %0, %0, 0, 16\n\t"
+ : "=r" (sum), "=&r" (__dummy)
+ : "0" (sum));
+ return (__force __sum16)sum;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum, tmp, endaddr;
+
+ __asm__ __volatile__(
+ "sub %0, %0, %0\n\t"
+#if XCHAL_HAVE_LOOPS
+ "loopgtz %2, 2f\n\t"
+#else
+ "beqz %2, 2f\n\t"
+ "slli %4, %2, 2\n\t"
+ "add %4, %4, %1\n\t"
+ "0:\t"
+#endif
+ "l32i %3, %1, 0\n\t"
+ "add %0, %0, %3\n\t"
+ "bgeu %0, %3, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "addi %1, %1, 4\n\t"
+#if !XCHAL_HAVE_LOOPS
+ "blt %1, %4, 0b\n\t"
+#endif
+ "2:\t"
+ /* Since the input registers which are loaded with iph and ihl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
+ "=&r" (endaddr)
+ : "1" (iph), "2" (ihl)
+ : "memory");
+
+ return csum_fold(sum);
+}
+
+static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+
+#ifdef __XTENSA_EL__
+ unsigned long len_proto = (len + proto) << 8;
+#elif defined(__XTENSA_EB__)
+ unsigned long len_proto = len + proto;
+#else
+# error processor byte order undefined!
+#endif
+ __asm__("add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %2\n\t"
+ "bgeu %0, %2, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %3\n\t"
+ "bgeu %0, %3, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ : "=r" (sum), "=r" (len_proto)
+ : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold (csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+ unsigned int __dummy;
+ __asm__("l32i %1, %2, 0\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 4\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 8\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 12\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 0\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 4\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 8\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 12\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %4\n\t"
+ "bgeu %0, %4, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %5\n\t"
+ "bgeu %0, %5, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ : "=r" (sum), "=&r" (__dummy)
+ : "r" (saddr), "r" (daddr),
+ "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
+ : "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst, int len)
+{
+ if (!access_ok(dst, len))
+ return 0;
+ return csum_partial_copy_generic(src, (__force void *)dst, len);
+}
+#endif
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
new file mode 100644
index 000000000..a175f8aec
--- /dev/null
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -0,0 +1,223 @@
+/*
+ * Atomic xchg and cmpxchg operations.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CMPXCHG_H
+#define _XTENSA_CMPXCHG_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bits.h>
+#include <linux/stringify.h>
+
+/*
+ * cmpxchg
+ */
+
+static inline unsigned long
+__cmpxchg_u32(volatile int *p, int old, int new)
+{
+#if XCHAL_HAVE_EXCLUSIVE
+ unsigned long tmp, result;
+
+ __asm__ __volatile__(
+ "1: l32ex %[result], %[addr]\n"
+ " bne %[result], %[cmp], 2f\n"
+ " mov %[tmp], %[new]\n"
+ " s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
+ "2:\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp)
+ : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
+ : "memory"
+ );
+
+ return result;
+#elif XCHAL_HAVE_S32C1I
+ __asm__ __volatile__(
+ " wsr %[cmp], scompare1\n"
+ " s32c1i %[new], %[mem]\n"
+ : [new] "+a" (new), [mem] "+m" (*p)
+ : [cmp] "a" (old)
+ : "memory"
+ );
+
+ return new;
+#else
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(TOPLEVEL)"\n"
+ " l32i %[old], %[mem]\n"
+ " bne %[old], %[cmp], 1f\n"
+ " s32i %[new], %[mem]\n"
+ "1:\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : [old] "=&a" (old), [mem] "+m" (*p)
+ : [cmp] "a" (old), [new] "r" (new)
+ : "a15", "memory");
+ return old;
+#endif
+}
+/* This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg(). */
+
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4: return __cmpxchg_u32(ptr, old, new);
+ default: __cmpxchg_called_with_bad_pointer();
+ return old;
+ }
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof (*(ptr))); \
+ })
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+
+/*
+ * xchg_u32
+ *
+ * Note that a15 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a15 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
+{
+#if XCHAL_HAVE_EXCLUSIVE
+ unsigned long tmp, result;
+
+ __asm__ __volatile__(
+ "1: l32ex %[result], %[addr]\n"
+ " mov %[tmp], %[val]\n"
+ " s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp)
+ : [val] "a" (val), [addr] "a" (m)
+ : "memory"
+ );
+
+ return result;
+#elif XCHAL_HAVE_S32C1I
+ unsigned long tmp, result;
+ __asm__ __volatile__(
+ "1: l32i %[tmp], %[mem]\n"
+ " mov %[result], %[val]\n"
+ " wsr %[tmp], scompare1\n"
+ " s32c1i %[result], %[mem]\n"
+ " bne %[result], %[tmp], 1b\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp),
+ [mem] "+m" (*m)
+ : [val] "a" (val)
+ : "memory"
+ );
+ return result;
+#else
+ unsigned long tmp;
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(TOPLEVEL)"\n"
+ " l32i %[tmp], %[mem]\n"
+ " s32i %[val], %[mem]\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : [tmp] "=&a" (tmp), [mem] "+m" (*m)
+ : [val] "a" (val)
+ : "a15", "memory");
+ return tmp;
+#endif
+}
+
+#define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
+{
+ int off = (unsigned long)ptr % sizeof(u32);
+ volatile u32 *p = ptr - off;
+#ifdef __BIG_ENDIAN
+ int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
+#else
+ int bitoff = off * BITS_PER_BYTE;
+#endif
+ u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
+ u32 oldv, newv;
+ u32 ret;
+
+ do {
+ oldv = READ_ONCE(*p);
+ ret = (oldv & bitmask) >> bitoff;
+ newv = (oldv & ~bitmask) | (x << bitoff);
+ } while (__cmpxchg_u32(p, oldv, newv) != oldv);
+
+ return ret;
+}
+
+/*
+ * This only works if the compiler isn't horribly bad at optimizing.
+ * gcc-2.5.8 reportedly can't handle this, but I define that one to
+ * be dead anyway.
+ */
+
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ return xchg_small(ptr, x, 1);
+ case 2:
+ return xchg_small(ptr, x, 2);
+ case 4:
+ return xchg_u32(ptr, x);
+ default:
+ __xchg_called_with_bad_pointer();
+ return x;
+ }
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _XTENSA_CMPXCHG_H */
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
new file mode 100644
index 000000000..0fbe2a740
--- /dev/null
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -0,0 +1,154 @@
+/*
+ * include/asm-xtensa/coprocessor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
+ */
+
+
+#ifndef _XTENSA_COPROCESSOR_H
+#define _XTENSA_COPROCESSOR_H
+
+#include <variant/tie.h>
+#include <asm/core.h>
+#include <asm/types.h>
+
+#ifdef __ASSEMBLY__
+# include <variant/tie-asm.h>
+
+.macro xchal_sa_start a b
+ .set .Lxchal_pofs_, 0
+ .set .Lxchal_ofs_, 0
+.endm
+
+.macro xchal_sa_align ptr minofs maxofs ofsalign totalign
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1
+ .set .Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_
+.endm
+
+#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
+ | XTHAL_SAS_CC \
+ | XTHAL_SAS_CALR | XTHAL_SAS_CALE )
+
+.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_OPT_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+
+.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_OPT_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+#undef _SELECT
+
+#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
+ | XTHAL_SAS_NOCC \
+ | XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
+
+.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_USER_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+
+.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_USER_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+#undef _SELECT
+
+
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
+ *
+ * XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured.
+ *
+ */
+
+#define XTENSA_HAVE_COPROCESSOR(x) \
+ ((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x)))
+#define XTENSA_HAVE_COPROCESSORS \
+ (XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK)
+#define XTENSA_HAVE_IO_PORT(x) \
+ (XCHAL_CP_PORT_MASK & (1 << (x)))
+#define XTENSA_HAVE_IO_PORTS \
+ XCHAL_CP_PORT_MASK
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Additional registers.
+ * We define three types of additional registers:
+ * ext: extra registers that are used by the compiler
+ * cpn: optional registers that can be used by a user application
+ * cpX: coprocessor registers that can only be used if the corresponding
+ * CPENABLE bit is set.
+ */
+
+#define XCHAL_SA_REG(list,cc,abi,type,y,name,z,align,size,...) \
+ __REG ## list (cc, abi, type, name, size, align)
+
+#define __REG0(cc,abi,t,name,s,a) __REG0_ ## cc (abi,name)
+#define __REG1(cc,abi,t,name,s,a) __REG1_ ## cc (name)
+#define __REG2(cc,abi,type,...) __REG2_ ## type (__VA_ARGS__)
+
+#define __REG0_0(abi,name)
+#define __REG0_1(abi,name) __REG0_1 ## abi (name)
+#define __REG0_10(name) __u32 name;
+#define __REG0_11(name) __u32 name;
+#define __REG0_12(name)
+
+#define __REG1_0(name) __u32 name;
+#define __REG1_1(name)
+
+#define __REG2_0(n,s,a) __u32 name;
+#define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
+#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
+
+typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t
+ __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
+typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t
+ __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
+
+#if XTENSA_HAVE_COPROCESSORS
+
+typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t
+ __attribute__ ((aligned (XCHAL_CP0_SA_ALIGN)));
+typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t
+ __attribute__ ((aligned (XCHAL_CP1_SA_ALIGN)));
+typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t
+ __attribute__ ((aligned (XCHAL_CP2_SA_ALIGN)));
+typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t
+ __attribute__ ((aligned (XCHAL_CP3_SA_ALIGN)));
+typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t
+ __attribute__ ((aligned (XCHAL_CP4_SA_ALIGN)));
+typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t
+ __attribute__ ((aligned (XCHAL_CP5_SA_ALIGN)));
+typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
+ __attribute__ ((aligned (XCHAL_CP6_SA_ALIGN)));
+typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
+ __attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
+
+extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
+extern void coprocessor_flush(struct thread_info*, int);
+
+extern void coprocessor_release_all(struct thread_info*);
+extern void coprocessor_flush_all(struct thread_info*);
+
+#endif /* XTENSA_HAVE_COPROCESSORS */
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _XTENSA_COPROCESSOR_H */
diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
new file mode 100644
index 000000000..0fa364964
--- /dev/null
+++ b/arch/xtensa/include/asm/core.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Cadence Design Systems Inc. */
+
+#ifndef _ASM_XTENSA_CORE_H
+#define _ASM_XTENSA_CORE_H
+
+#include <variant/core.h>
+
+#ifndef XCHAL_HAVE_DIV32
+#define XCHAL_HAVE_DIV32 0
+#endif
+
+#ifndef XCHAL_HAVE_EXCLUSIVE
+#define XCHAL_HAVE_EXCLUSIVE 0
+#endif
+
+#ifndef XCHAL_HAVE_EXTERN_REGS
+#define XCHAL_HAVE_EXTERN_REGS 0
+#endif
+
+#ifndef XCHAL_HAVE_MPU
+#define XCHAL_HAVE_MPU 0
+#endif
+
+#ifndef XCHAL_HAVE_VECBASE
+#define XCHAL_HAVE_VECBASE 0
+#endif
+
+#ifndef XCHAL_SPANNING_WAY
+#define XCHAL_SPANNING_WAY 0
+#endif
+
+#ifndef XCHAL_HW_MIN_VERSION
+#if defined(XCHAL_HW_MIN_VERSION_MAJOR) && defined(XCHAL_HW_MIN_VERSION_MINOR)
+#define XCHAL_HW_MIN_VERSION (XCHAL_HW_MIN_VERSION_MAJOR * 100 + \
+ XCHAL_HW_MIN_VERSION_MINOR)
+#else
+#define XCHAL_HW_MIN_VERSION 0
+#endif
+#endif
+
+#endif
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
new file mode 100644
index 000000000..5d98a7ad4
--- /dev/null
+++ b/arch/xtensa/include/asm/current.h
@@ -0,0 +1,38 @@
+/*
+ * include/asm-xtensa/current.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CURRENT_H
+#define _XTENSA_CURRENT_H
+
+#include <asm/thread_info.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+static inline struct task_struct *get_current(void)
+{
+ return current_thread_info()->task;
+}
+
+#define current get_current()
+
+#else
+
+#define GET_CURRENT(reg,sp) \
+ GET_THREAD_INFO(reg,sp); \
+ l32i reg, reg, TI_TASK \
+
+#endif
+
+
+#endif /* XTENSA_CURRENT_H */
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h
new file mode 100644
index 000000000..24304b39a
--- /dev/null
+++ b/arch/xtensa/include/asm/delay.h
@@ -0,0 +1,75 @@
+/*
+ * include/asm-xtensa/delay.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ */
+
+#ifndef _XTENSA_DELAY_H
+#define _XTENSA_DELAY_H
+
+#include <asm/timex.h>
+#include <asm/param.h>
+
+extern unsigned long loops_per_jiffy;
+
+static inline void __delay(unsigned long loops)
+{
+ if (__builtin_constant_p(loops) && loops < 2)
+ __asm__ __volatile__ ("nop");
+ else if (loops >= 2)
+ /* 2 cycles per loop. */
+ __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
+ : "+r" (loops));
+}
+
+/* Undefined function to get compile-time error */
+void __bad_udelay(void);
+void __bad_ndelay(void);
+
+#define __MAX_UDELAY 30000
+#define __MAX_NDELAY 30000
+
+static inline void __udelay(unsigned long usecs)
+{
+ unsigned long start = get_ccount();
+ unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5;
+
+ /* Note: all variables are unsigned (can wrap around)! */
+ while (((unsigned long)get_ccount()) - start < cycles)
+ cpu_relax();
+}
+
+static inline void udelay(unsigned long usec)
+{
+ if (__builtin_constant_p(usec) && usec >= __MAX_UDELAY)
+ __bad_udelay();
+ else
+ __udelay(usec);
+}
+
+static inline void __ndelay(unsigned long nsec)
+{
+ /*
+ * Inner shift makes sure multiplication doesn't overflow
+ * for legitimate nsec values
+ */
+ unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15;
+ __delay(cycles);
+}
+
+#define ndelay(n) ndelay(n)
+
+static inline void ndelay(unsigned long nsec)
+{
+ if (__builtin_constant_p(nsec) && nsec >= __MAX_NDELAY)
+ __bad_ndelay();
+ else
+ __ndelay(nsec);
+}
+
+#endif
diff --git a/arch/xtensa/include/asm/dma.h b/arch/xtensa/include/asm/dma.h
new file mode 100644
index 000000000..bb099a373
--- /dev/null
+++ b/arch/xtensa/include/asm/dma.h
@@ -0,0 +1,62 @@
+/*
+ * include/asm-xtensa/dma.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_DMA_H
+#define _XTENSA_DMA_H
+
+#include <asm/io.h> /* need byte IO */
+
+/*
+ * This is only to be defined if we have PC-like DMA.
+ * By default this is not true on an Xtensa processor,
+ * however on boards with a PCI bus, such functionality
+ * might be emulated externally.
+ *
+ * NOTE: there still exists driver code that assumes
+ * this is defined, eg. drivers/sound/soundcard.c (as of 2.4).
+ */
+#define MAX_DMA_CHANNELS 8
+
+/*
+ * The maximum virtual address to which DMA transfers
+ * can be performed on this platform.
+ *
+ * NOTE: This is board (platform) specific, not processor-specific!
+ *
+ * NOTE: This assumes DMA transfers can only be performed on
+ * the section of physical memory contiguously mapped in virtual
+ * space for the kernel. For the Xtensa architecture, this
+ * means the maximum possible size of this DMA area is
+ * the size of the statically mapped kernel segment
+ * (XCHAL_KSEG_{CACHED,BYPASS}_SIZE), ie. 128 MB.
+ *
+ * NOTE: When the entire KSEG area is DMA capable, we subtract
+ * one from the max address so that the virt_to_phys() macro
+ * works correctly on the address (otherwise the address
+ * enters another area, and virt_to_phys() may not return
+ * the value desired).
+ */
+
+#ifndef MAX_DMA_ADDRESS
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
+#endif
+
+/* Reserve and release a DMA channel */
+extern int request_dma(unsigned int dmanr, const char * device_id);
+extern void free_dma(unsigned int dmanr);
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+
+#endif
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
new file mode 100644
index 000000000..909a6ab4f
--- /dev/null
+++ b/arch/xtensa/include/asm/elf.h
@@ -0,0 +1,180 @@
+/*
+ * include/asm-xtensa/elf.h
+ *
+ * ELF register definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ELF_H
+#define _XTENSA_ELF_H
+
+#include <asm/ptrace.h>
+#include <asm/coprocessor.h>
+#include <linux/elf-em.h>
+
+/* Xtensa processor ELF architecture-magic number */
+
+#define EM_XTENSA_OLD 0xABC7
+
+/* Xtensa relocations defined by the ABIs */
+
+#define R_XTENSA_NONE 0
+#define R_XTENSA_32 1
+#define R_XTENSA_RTLD 2
+#define R_XTENSA_GLOB_DAT 3
+#define R_XTENSA_JMP_SLOT 4
+#define R_XTENSA_RELATIVE 5
+#define R_XTENSA_PLT 6
+#define R_XTENSA_OP0 8
+#define R_XTENSA_OP1 9
+#define R_XTENSA_OP2 10
+#define R_XTENSA_ASM_EXPAND 11
+#define R_XTENSA_ASM_SIMPLIFY 12
+#define R_XTENSA_GNU_VTINHERIT 15
+#define R_XTENSA_GNU_VTENTRY 16
+#define R_XTENSA_DIFF8 17
+#define R_XTENSA_DIFF16 18
+#define R_XTENSA_DIFF32 19
+#define R_XTENSA_SLOT0_OP 20
+#define R_XTENSA_SLOT1_OP 21
+#define R_XTENSA_SLOT2_OP 22
+#define R_XTENSA_SLOT3_OP 23
+#define R_XTENSA_SLOT4_OP 24
+#define R_XTENSA_SLOT5_OP 25
+#define R_XTENSA_SLOT6_OP 26
+#define R_XTENSA_SLOT7_OP 27
+#define R_XTENSA_SLOT8_OP 28
+#define R_XTENSA_SLOT9_OP 29
+#define R_XTENSA_SLOT10_OP 30
+#define R_XTENSA_SLOT11_OP 31
+#define R_XTENSA_SLOT12_OP 32
+#define R_XTENSA_SLOT13_OP 33
+#define R_XTENSA_SLOT14_OP 34
+#define R_XTENSA_SLOT0_ALT 35
+#define R_XTENSA_SLOT1_ALT 36
+#define R_XTENSA_SLOT2_ALT 37
+#define R_XTENSA_SLOT3_ALT 38
+#define R_XTENSA_SLOT4_ALT 39
+#define R_XTENSA_SLOT5_ALT 40
+#define R_XTENSA_SLOT6_ALT 41
+#define R_XTENSA_SLOT7_ALT 42
+#define R_XTENSA_SLOT8_ALT 43
+#define R_XTENSA_SLOT9_ALT 44
+#define R_XTENSA_SLOT10_ALT 45
+#define R_XTENSA_SLOT11_ALT 46
+#define R_XTENSA_SLOT12_ALT 47
+#define R_XTENSA_SLOT13_ALT 48
+#define R_XTENSA_SLOT14_ALT 49
+
+/* ELF register definitions. This is needed for core dump support. */
+
+typedef unsigned long elf_greg_t;
+
+typedef struct user_pt_regs xtensa_gregset_t;
+
+#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#define ELF_NFPREG 18
+
+typedef unsigned int elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+
+#define elf_check_arch(x) ( ( (x)->e_machine == EM_XTENSA ) || \
+ ( (x)->e_machine == EM_XTENSA_OLD ) )
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+
+#ifdef __XTENSA_EL__
+# define ELF_DATA ELFDATA2LSB
+#elif defined(__XTENSA_EB__)
+# define ELF_DATA ELFDATA2MSB
+#else
+# error processor byte order undefined!
+#endif
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_ARCH EM_XTENSA
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+#define CORE_DUMP_USE_REGSET
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this CPU supports. This could be done in user space,
+ * but it's not easy, and we've already done it here.
+ */
+
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ * For the moment, we have only optimizations for the Intel generations,
+ * but that could change...
+ */
+
+#define ELF_PLATFORM (NULL)
+
+/*
+ * The Xtensa processor ABI says that when the program starts, a2
+ * contains a pointer to a function which might be registered using
+ * `atexit'. This provides a mean for the dynamic linker to call
+ * DT_FINI functions for shared libraries that have been loaded before
+ * the code runs.
+ *
+ * A value of 0 tells we have no such handler.
+ *
+ * We might as well make sure everything else is cleared too (except
+ * for the stack pointer in a1), just to make things more
+ * deterministic. Also, clearing a0 terminates debugger backtraces.
+ */
+
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
+ _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
+ _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
+ _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
+ } while (0)
+
+typedef struct {
+ xtregs_opt_t opt;
+ xtregs_user_t user;
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_cp0_t cp0;
+ xtregs_cp1_t cp1;
+ xtregs_cp2_t cp2;
+ xtregs_cp3_t cp3;
+ xtregs_cp4_t cp4;
+ xtregs_cp5_t cp5;
+ xtregs_cp6_t cp6;
+ xtregs_cp7_t cp7;
+#endif
+} elf_xtregs_t;
+
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
+
+#endif /* _XTENSA_ELF_H */
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
new file mode 100644
index 000000000..a06ffb0c6
--- /dev/null
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -0,0 +1,79 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <linux/pgtable.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the start of the consistent memory region upwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM
+ /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN +
+ (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1,
+#endif
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+
+#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /* Check if this memory layout is broken because fixmap overlaps page
+ * table.
+ */
+ BUILD_BUG_ON(FIXADDR_START <
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+#endif
+
+#endif
diff --git a/arch/xtensa/include/asm/flat.h b/arch/xtensa/include/asm/flat.h
new file mode 100644
index 000000000..ed5870c77
--- /dev/null
+++ b/arch/xtensa/include/asm/flat.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_XTENSA_FLAT_H
+#define __ASM_XTENSA_FLAT_H
+
+#include <asm/unaligned.h>
+
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr)
+{
+ *addr = get_unaligned((__force u32 *)rp);
+ return 0;
+}
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+ put_unaligned(addr, (__force u32 *)rp);
+ return 0;
+}
+
+#endif /* __ASM_XTENSA_FLAT_H */
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
new file mode 100644
index 000000000..6c6d9a9f1
--- /dev/null
+++ b/arch/xtensa/include/asm/ftrace.h
@@ -0,0 +1,40 @@
+/*
+ * arch/xtensa/include/asm/ftrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_FTRACE_H
+#define _XTENSA_FTRACE_H
+
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+#define ftrace_return_address0 ({ unsigned long a0, a1; \
+ __asm__ __volatile__ ( \
+ "mov %0, a0\n" \
+ "mov %1, a1\n" \
+ : "=r"(a0), "=r"(a1)); \
+ MAKE_PC_FROM_RA(a0, a1); })
+
+#ifdef CONFIG_FRAME_POINTER
+extern unsigned long return_address(unsigned level);
+#define ftrace_return_address(n) return_address(n)
+#endif
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+#define MCOUNT_INSN_SIZE 3
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+#define mcount _mcount
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* _XTENSA_FTRACE_H */
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
new file mode 100644
index 000000000..a1a27b2ea
--- /dev/null
+++ b/arch/xtensa/include/asm/futex.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Atomic futex routines
+ *
+ * Based on the PowerPC implementataion
+ *
+ * Copyright (C) 2013 TangoTec Ltd.
+ *
+ * Baruch Siach <baruch@tkos.co.il>
+ */
+
+#ifndef _ASM_XTENSA_FUTEX_H
+#define _ASM_XTENSA_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+
+#if XCHAL_HAVE_EXCLUSIVE
+#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
+ __asm__ __volatile( \
+ "1: l32ex %[oldval], %[addr]\n" \
+ insn "\n" \
+ "2: s32ex %[newval], %[addr]\n" \
+ " getex %[newval]\n" \
+ " beqz %[newval], 1b\n" \
+ " movi %[newval], 0\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ " .literal_position\n" \
+ "5: movi %[oldval], 3b\n" \
+ " movi %[newval], %[fault]\n" \
+ " jx %[oldval]\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .long 1b, 5b, 2b, 5b\n" \
+ " .previous\n" \
+ : [oldval] "=&r" (old), [newval] "=&r" (ret) \
+ : [addr] "r" (uaddr), [oparg] "r" (arg), \
+ [fault] "I" (-EFAULT) \
+ : "memory")
+#elif XCHAL_HAVE_S32C1I
+#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
+ __asm__ __volatile( \
+ "1: l32i %[oldval], %[mem]\n" \
+ insn "\n" \
+ " wsr %[oldval], scompare1\n" \
+ "2: s32c1i %[newval], %[mem]\n" \
+ " bne %[newval], %[oldval], 1b\n" \
+ " movi %[newval], 0\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ " .literal_position\n" \
+ "5: movi %[oldval], 3b\n" \
+ " movi %[newval], %[fault]\n" \
+ " jx %[oldval]\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .long 1b, 5b, 2b, 5b\n" \
+ " .previous\n" \
+ : [oldval] "=&r" (old), [newval] "=&r" (ret), \
+ [mem] "+m" (*(uaddr)) \
+ : [oparg] "r" (arg), [fault] "I" (-EFAULT) \
+ : "memory")
+#endif
+
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
+{
+#if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
+ int oldval = 0, ret;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %[newval], %[oparg]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+#else
+ return -ENOSYS;
+#endif
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+#if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
+ unsigned long tmp;
+ int ret = 0;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __asm__ __volatile__ (
+ " # futex_atomic_cmpxchg_inatomic\n"
+#if XCHAL_HAVE_EXCLUSIVE
+ "1: l32ex %[tmp], %[addr]\n"
+ " s32i %[tmp], %[uval], 0\n"
+ " bne %[tmp], %[oldval], 2f\n"
+ " mov %[tmp], %[newval]\n"
+ "3: s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
+#elif XCHAL_HAVE_S32C1I
+ " wsr %[oldval], scompare1\n"
+ "1: s32c1i %[newval], %[addr], 0\n"
+ " s32i %[newval], %[uval], 0\n"
+#endif
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ " .align 4\n"
+ " .literal_position\n"
+ "4: movi %[tmp], 2b\n"
+ " movi %[ret], %[fault]\n"
+ " jx %[tmp]\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ " .long 1b, 4b\n"
+#if XCHAL_HAVE_EXCLUSIVE
+ " .long 3b, 4b\n"
+#endif
+ " .previous\n"
+ : [ret] "+r" (ret), [newval] "+r" (newval), [tmp] "=&r" (tmp)
+ : [addr] "r" (uaddr), [oldval] "r" (oldval), [uval] "r" (uval),
+ [fault] "I" (-EFAULT)
+ : "memory");
+
+ return ret;
+#else
+ return -ENOSYS;
+#endif
+}
+
+#endif /* _ASM_XTENSA_FUTEX_H */
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
new file mode 100644
index 000000000..eac503215
--- /dev/null
+++ b/arch/xtensa/include/asm/highmem.h
@@ -0,0 +1,73 @@
+/*
+ * include/asm-xtensa/highmem.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_HIGHMEM_H
+#define _XTENSA_HIGHMEM_H
+
+#include <linux/wait.h>
+#include <linux/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/kmap_types.h>
+
+#define PKMAP_BASE ((FIXADDR_START - \
+ (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
+#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS)
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot PAGE_KERNEL_EXEC
+
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+#define get_pkmap_color get_pkmap_color
+static inline int get_pkmap_color(struct page *page)
+{
+ return DCACHE_ALIAS(page_to_phys(page));
+}
+
+extern unsigned int last_pkmap_nr_arr[];
+
+static inline unsigned int get_next_pkmap_nr(unsigned int color)
+{
+ last_pkmap_nr_arr[color] =
+ (last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK;
+ return last_pkmap_nr_arr[color] + color;
+}
+
+static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
+{
+ return pkmap_nr < DCACHE_N_COLORS;
+}
+
+static inline int get_pkmap_entries_count(unsigned int color)
+{
+ return LAST_PKMAP / DCACHE_N_COLORS;
+}
+
+extern wait_queue_head_t pkmap_map_wait_arr[];
+
+static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
+{
+ return pkmap_map_wait_arr + color;
+}
+#endif
+
+extern pte_t *pkmap_page_table;
+
+static inline void flush_cache_kmaps(void)
+{
+ flush_cache_all();
+}
+
+void kmap_init(void);
+
+#endif
diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000..9f119c1ca
--- /dev/null
+++ b/arch/xtensa/include/asm/hw_breakpoint.h
@@ -0,0 +1,61 @@
+/*
+ * Xtensa hardware breakpoints/watchpoints handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef __ASM_XTENSA_HW_BREAKPOINT_H
+#define __ASM_XTENSA_HW_BREAKPOINT_H
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <uapi/linux/hw_breakpoint.h>
+
+/* Breakpoint */
+#define XTENSA_BREAKPOINT_EXECUTE 0
+
+/* Watchpoints */
+#define XTENSA_BREAKPOINT_LOAD 1
+#define XTENSA_BREAKPOINT_STORE 2
+
+struct arch_hw_breakpoint {
+ unsigned long address;
+ u16 len;
+ u16 type;
+};
+
+struct perf_event_attr;
+struct perf_event;
+struct pt_regs;
+struct task_struct;
+
+int hw_breakpoint_slots(int type);
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data);
+
+int arch_install_hw_breakpoint(struct perf_event *bp);
+void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+void hw_breakpoint_pmu_read(struct perf_event *bp);
+int check_hw_breakpoint(struct pt_regs *regs);
+void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
+
+#else
+
+struct task_struct;
+
+static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+}
+
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif /* __ASM_XTENSA_HW_BREAKPOINT_H */
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
new file mode 100644
index 000000000..05cb13dfe
--- /dev/null
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -0,0 +1,245 @@
+/*
+ * arch/xtensa/include/asm/initialize_mmu.h
+ *
+ * Initializes MMU:
+ *
+ * For the new V3 MMU we remap the TLB from virtual == physical
+ * to the standard Linux mapping used in earlier MMU's.
+ *
+ * For the MMU we also support a new configuration register that
+ * specifies how the S32C1I instruction operates with the cache
+ * controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica, Inc.
+ *
+ * Marc Gauthier <marc@tensilica.com>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#ifndef _XTENSA_INITIALIZE_MMU_H
+#define _XTENSA_INITIALIZE_MMU_H
+
+#include <linux/init.h>
+#include <linux/pgtable.h>
+#include <asm/vectors.h>
+
+#if XCHAL_HAVE_PTP_MMU
+#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#else
+#define CA_WRITEBACK (0x4)
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define XTENSA_HWVERSION_RC_2009_0 230000
+
+ .macro initialize_mmu
+
+#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+/*
+ * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
+ * For details see Documentation/xtensa/atomctl.rst
+ */
+#if XCHAL_DCACHE_IS_COHERENT
+ movi a3, 0x25 /* For SMP/MX -- internal for writeback,
+ * RCW otherwise
+ */
+#else
+ movi a3, 0x29 /* non-MX -- Most cores use Std Memory
+ * Controlers which usually can't use RCW
+ */
+#endif
+ wsr a3, atomctl
+#endif /* XCHAL_HAVE_S32C1I &&
+ * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+ */
+
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/*
+ * Have MMU v3
+ */
+
+#if !XCHAL_HAVE_VECBASE
+# error "MMU v3 requires reloc vectors"
+#endif
+
+ movi a1, 0
+ _call0 1f
+ _j 2f
+
+ .align 4
+1: movi a2, 0x10000000
+
+#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
+#define TEMP_MAPPING_VADDR 0x40000000
+#else
+#define TEMP_MAPPING_VADDR 0x00000000
+#endif
+
+ /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
+
+ movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
+ idtlb a2
+ iitlb a2
+ isync
+
+ /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
+ * and jump to the new mapping.
+ */
+
+ srli a3, a0, 27
+ slli a3, a3, 27
+ addi a3, a3, CA_BYPASS
+ addi a7, a2, 5 - XCHAL_SPANNING_WAY
+ wdtlb a3, a7
+ witlb a3, a7
+ isync
+
+ slli a4, a0, 5
+ srli a4, a4, 5
+ addi a5, a2, -XCHAL_SPANNING_WAY
+ add a4, a4, a5
+ jx a4
+
+ /* Step 3: unmap everything other than current area.
+ * Start at 0x60000000, wrap around, and end with 0x20000000
+ */
+2: movi a4, 0x20000000
+ add a5, a2, a4
+3: idtlb a5
+ iitlb a5
+ add a5, a5, a4
+ bne a5, a2, 3b
+
+ /* Step 4: Setup MMU with the requested static mappings. */
+
+ movi a6, 0x01000000
+ wsr a6, ITLBCFG
+ wsr a6, DTLBCFG
+ isync
+
+ movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+#ifdef CONFIG_XTENSA_KSEG_512M
+ movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+#endif
+
+ movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
+ movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
+ movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+ isync
+
+ /* Jump to self, using final mappings. */
+ movi a4, 1f
+ jx a4
+
+1:
+ /* Step 5: remove temporary mapping. */
+ idtlb a7
+ iitlb a7
+ isync
+
+ movi a0, 0
+ wsr a0, ptevaddr
+ rsync
+
+#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
+ XCHAL_HAVE_SPANNING_WAY */
+
+ .endm
+
+ .macro initialize_cacheattr
+
+#if !defined(CONFIG_MMU) && (XCHAL_HAVE_TLBS || XCHAL_HAVE_MPU)
+#if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
+#error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
+#endif
+
+#if XCHAL_HAVE_MPU
+ __REFCONST
+ .align 4
+.Lattribute_table:
+ .long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
+ .long 0x006600, 0x000000, 0x000000, 0x000000
+ .long 0x000000, 0x000000, 0x000000, 0x000000
+ .long 0x000000, 0x000000, 0x000000, 0x000000
+ .previous
+
+ movi a3, .Lattribute_table
+ movi a4, CONFIG_MEMMAP_CACHEATTR
+ movi a5, 1
+ movi a6, XCHAL_MPU_ENTRIES
+ movi a10, 0x20000000
+ movi a11, -1
+1:
+ sub a5, a5, a10
+ extui a8, a4, 28, 4
+ beq a8, a11, 2f
+ addi a6, a6, -1
+ mov a11, a8
+2:
+ addx4 a9, a8, a3
+ l32i a9, a9, 0
+ or a9, a9, a6
+ wptlb a9, a5
+ slli a4, a4, 4
+ bgeu a5, a10, 1b
+
+#else
+ movi a5, XCHAL_SPANNING_WAY
+ movi a6, ~_PAGE_ATTRIB_MASK
+ movi a4, CONFIG_MEMMAP_CACHEATTR
+ movi a8, 0x20000000
+1:
+ rdtlb1 a3, a5
+ xor a3, a3, a4
+ and a3, a3, a6
+ xor a3, a3, a4
+ wdtlb a3, a5
+ ritlb1 a3, a5
+ xor a3, a3, a4
+ and a3, a3, a6
+ xor a3, a3, a4
+ witlb a3, a5
+
+ add a5, a5, a8
+ srli a4, a4, 4
+ bgeu a5, a8, 1b
+
+ isync
+#endif
+#endif
+
+ .endm
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _XTENSA_INITIALIZE_MMU_H */
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
new file mode 100644
index 000000000..54188e69b
--- /dev/null
+++ b/arch/xtensa/include/asm/io.h
@@ -0,0 +1,73 @@
+/*
+ * include/asm-xtensa/io.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IO_H
+#define _XTENSA_IO_H
+
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <asm/vectors.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <linux/types.h>
+
+#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
+#define IO_SPACE_LIMIT ~0
+#define PCI_IOBASE ((void __iomem *)XCHAL_KIO_BYPASS_VADDR)
+
+#ifdef CONFIG_MMU
+
+void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
+void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
+void xtensa_iounmap(volatile void __iomem *addr);
+
+/*
+ * Return the virtual address for the specified bus memory.
+ */
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+{
+ if (offset >= XCHAL_KIO_PADDR
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
+ return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
+ else
+ return xtensa_ioremap_nocache(offset, size);
+}
+
+static inline void __iomem *ioremap_cache(unsigned long offset,
+ unsigned long size)
+{
+ if (offset >= XCHAL_KIO_PADDR
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
+ return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
+ else
+ return xtensa_ioremap_cache(offset, size);
+}
+#define ioremap_cache ioremap_cache
+
+static inline void iounmap(volatile void __iomem *addr)
+{
+ unsigned long va = (unsigned long) addr;
+
+ if (!(va >= XCHAL_KIO_CACHED_VADDR &&
+ va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
+ !(va >= XCHAL_KIO_BYPASS_VADDR &&
+ va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
+ xtensa_iounmap(addr);
+}
+
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+#endif /* CONFIG_MMU */
+
+#include <asm-generic/io.h>
+
+#endif /* _XTENSA_IO_H */
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
new file mode 100644
index 000000000..0f71a51da
--- /dev/null
+++ b/arch/xtensa/include/asm/irq.h
@@ -0,0 +1,42 @@
+/*
+ * include/asm-xtensa/irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IRQ_H
+#define _XTENSA_IRQ_H
+
+#include <linux/init.h>
+#include <asm/core.h>
+
+#ifdef CONFIG_PLATFORM_NR_IRQS
+# define PLATFORM_NR_IRQS CONFIG_PLATFORM_NR_IRQS
+#else
+# define PLATFORM_NR_IRQS 0
+#endif
+#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
+#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return (irq);
+}
+
+struct irqaction;
+struct irq_domain;
+
+void migrate_irqs(void);
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+ unsigned long int_irq, unsigned long ext_irq,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw);
+unsigned xtensa_map_ext_irq(unsigned ext_irq);
+unsigned xtensa_get_ext_irq_no(unsigned irq);
+
+#endif /* _XTENSA_IRQ_H */
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
new file mode 100644
index 000000000..128906815
--- /dev/null
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -0,0 +1,83 @@
+/*
+ * Xtensa IRQ flags handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_IRQFLAGS_H
+#define _XTENSA_IRQFLAGS_H
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <asm/processor.h>
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ asm volatile("rsr %0, ps" : "=a" (flags));
+ return flags;
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_MISC) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+ unsigned long tmp;
+
+ asm volatile("rsr %0, ps\t\n"
+ "extui %1, %0, 0, 4\t\n"
+ "bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n"
+ "rsil %0, "__stringify(LOCKLEVEL)"\n"
+ "1:"
+ : "=a" (flags), "=a" (tmp) :: "memory");
+#else
+ asm volatile("rsr %0, ps\t\n"
+ "or %0, %0, %1\t\n"
+ "xsr %0, ps\t\n"
+ "rsync"
+ : "=&a" (flags) : "a" (LOCKLEVEL) : "memory");
+#endif
+#else
+ asm volatile("rsil %0, "__stringify(LOCKLEVEL)
+ : "=a" (flags) :: "memory");
+#endif
+ return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ arch_local_irq_save();
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long flags;
+ asm volatile("rsil %0, 0" : "=a" (flags) :: "memory");
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ asm volatile("wsr %0, ps; rsync"
+ :: "a" (flags) : "memory");
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL
+#error "XCHAL_EXCM_LEVEL and 1<<PS_EXCM_BIT must be no less than LOCKLEVEL"
+#endif
+ return (flags & (PS_INTLEVEL_MASK | (1 << PS_EXCM_BIT))) >= LOCKLEVEL;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* _XTENSA_IRQFLAGS_H */
diff --git a/arch/xtensa/include/asm/jump_label.h b/arch/xtensa/include/asm/jump_label.h
new file mode 100644
index 000000000..c812bf850
--- /dev/null
+++ b/arch/xtensa/include/asm/jump_label.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Cadence Design Systems Inc. */
+
+#ifndef _ASM_XTENSA_JUMP_LABEL_H
+#define _ASM_XTENSA_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+#define JUMP_LABEL_NOP_SIZE 3
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "_nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+{
+ /*
+ * Xtensa assembler will mark certain points in the code
+ * as unreachable, so that later assembler or linker relaxation
+ * passes could use them. A spot right after the J instruction
+ * is one such point. Assembler and/or linker may insert padding
+ * or literals here, breaking code flow in case the J instruction
+ * is later replaced with NOP. Put a label right after the J to
+ * make it reachable and wrap both into a no-transform block
+ * to avoid any assembler interference with this.
+ */
+ asm_volatile_goto("1:\n\t"
+ ".begin no-transform\n\t"
+ "_j %l[l_yes]\n\t"
+ "2:\n\t"
+ ".end no-transform\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h
new file mode 100644
index 000000000..216b6f32c
--- /dev/null
+++ b/arch/xtensa/include/asm/kasan.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <asm/kmem_layout.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+/* Start of area covered by KASAN */
+#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
+/* Start of the shadow map */
+#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
+/* Size of the shadow map */
+#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
+/* Offset for mem to shadow address transformation */
+#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+
+void __init kasan_early_init(void);
+void __init kasan_init(void);
+
+#else
+
+static inline void kasan_early_init(void)
+{
+}
+
+static inline void kasan_init(void)
+{
+}
+
+#endif
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
new file mode 100644
index 000000000..6fc05cba6
--- /dev/null
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -0,0 +1,104 @@
+/*
+ * Kernel virtual memory layout definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_KMEM_LAYOUT_H
+#define _XTENSA_KMEM_LAYOUT_H
+
+#include <asm/core.h>
+#include <asm/types.h>
+
+#ifdef CONFIG_MMU
+
+/*
+ * Fixed TLB translations in the processor.
+ */
+
+#define XCHAL_PAGE_TABLE_VADDR __XTENSA_UL_CONST(0x80000000)
+#define XCHAL_PAGE_TABLE_SIZE __XTENSA_UL_CONST(0x00400000)
+
+#if defined(CONFIG_XTENSA_KSEG_MMU_V2)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_TLB_WAY 5
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_256M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xb0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_512M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xa0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#else
+#error Unsupported KSEG configuration
+#endif
+
+#ifdef CONFIG_KSEG_PADDR
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(CONFIG_KSEG_PADDR)
+#else
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
+#endif
+
+#if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1)
+#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
+#endif
+
+#endif
+
+/* KIO definition */
+
+#if XCHAL_HAVE_PTP_MMU
+#define XCHAL_KIO_CACHED_VADDR 0xe0000000
+#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
+#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#else
+#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
+#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
+#endif
+#define XCHAL_KIO_SIZE 0x10000000
+
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
+#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
+#ifndef __ASSEMBLY__
+extern unsigned long xtensa_kio_paddr;
+
+static inline unsigned long xtensa_get_kio_paddr(void)
+{
+ return xtensa_kio_paddr;
+}
+#endif
+#else
+#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
+#endif
+
+/* KERNEL_STACK definition */
+
+#ifndef CONFIG_KASAN
+#define KERNEL_STACK_SHIFT 13
+#else
+#define KERNEL_STACK_SHIFT 15
+#endif
+#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT)
+
+#endif
diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h
new file mode 100644
index 000000000..0ba997323
--- /dev/null
+++ b/arch/xtensa/include/asm/linkage.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif
diff --git a/arch/xtensa/include/asm/mmu.h b/arch/xtensa/include/asm/mmu.h
new file mode 100644
index 000000000..71afe418d
--- /dev/null
+++ b/arch/xtensa/include/asm/mmu.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMU_H
+#define _XTENSA_MMU_H
+
+#ifndef CONFIG_MMU
+#include <asm-generic/mmu.h>
+#else
+
+typedef struct {
+ unsigned long asid[NR_CPUS];
+ unsigned int cpu;
+} mm_context_t;
+
+#endif /* CONFIG_MMU */
+#endif /* _XTENSA_MMU_H */
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
new file mode 100644
index 000000000..74923ef3b
--- /dev/null
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -0,0 +1,159 @@
+/*
+ * Switch an MMU context.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMU_CONTEXT_H
+#define _XTENSA_MMU_CONTEXT_H
+
+#ifndef CONFIG_MMU
+#include <asm/nommu_context.h>
+#else
+
+#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/pgtable.h>
+
+#include <asm/vectors.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/mm_hooks.h>
+#include <asm-generic/percpu.h>
+
+#if (XCHAL_HAVE_TLBS != 1)
+# error "Linux must have an MMU!"
+#endif
+
+DECLARE_PER_CPU(unsigned long, asid_cache);
+#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
+
+/*
+ * NO_CONTEXT is the invalid ASID value that we don't ever assign to
+ * any user or kernel context. We use the reserved values in the
+ * ASID_INSERT macro below.
+ *
+ * 0 invalid
+ * 1 kernel
+ * 2 reserved
+ * 3 reserved
+ * 4...255 available
+ */
+
+#define NO_CONTEXT 0
+#define ASID_USER_FIRST 4
+#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
+#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
+
+void init_mmu(void);
+void init_kio(void);
+
+static inline void set_rasid_register (unsigned long val)
+{
+ __asm__ __volatile__ (" wsr %0, rasid\n\t"
+ " isync\n" : : "a" (val));
+}
+
+static inline unsigned long get_rasid_register (void)
+{
+ unsigned long tmp;
+ __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
+ return tmp;
+}
+
+static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ unsigned long asid = cpu_asid_cache(cpu);
+ if ((++asid & ASID_MASK) == 0) {
+ /*
+ * Start new asid cycle; continue counting with next
+ * incarnation bits; skipping over 0, 1, 2, 3.
+ */
+ local_flush_tlb_all();
+ asid += ASID_USER_FIRST;
+ }
+ cpu_asid_cache(cpu) = asid;
+ mm->context.asid[cpu] = asid;
+ mm->context.cpu = cpu;
+}
+
+static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ /*
+ * Check if our ASID is of an older version and thus invalid.
+ */
+
+ if (mm) {
+ unsigned long asid = mm->context.asid[cpu];
+
+ if (asid == NO_CONTEXT ||
+ ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
+ get_new_mmu_context(mm, cpu);
+ }
+}
+
+static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
+{
+ get_mmu_context(mm, cpu);
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
+ invalidate_page_directory();
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
+ * to -1 says the process has never run on any core.
+ */
+
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ mm->context.asid[cpu] = NO_CONTEXT;
+ }
+ mm->context.cpu = -1;
+ return 0;
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+ int migrated = next->context.cpu != cpu;
+ /* Flush the icache if we migrated to a new core. */
+ if (migrated) {
+ __invalidate_icache_all();
+ next->context.cpu = cpu;
+ }
+ if (migrated || prev != next)
+ activate_context(next, cpu);
+}
+
+#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+ invalidate_page_directory();
+}
+
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ /* Nothing to do. */
+
+}
+
+#endif /* CONFIG_MMU */
+#endif /* _XTENSA_MMU_CONTEXT_H */
diff --git a/arch/xtensa/include/asm/mxregs.h b/arch/xtensa/include/asm/mxregs.h
new file mode 100644
index 000000000..73dcc5456
--- /dev/null
+++ b/arch/xtensa/include/asm/mxregs.h
@@ -0,0 +1,46 @@
+/*
+ * Xtensa MX interrupt distributor
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MXREGS_H
+#define _XTENSA_MXREGS_H
+
+/*
+ * RER/WER at, as Read/write external register
+ * at: value
+ * as: address
+ *
+ * Address Value
+ * 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p
+ * 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p
+ * 0180 0...0m..m Clear enable specified by mask (m)
+ * 0184 0...0m..m Set enable specified by mask (m)
+ * 0190 0...0x..x 8-bit IPI partition register
+ * VVVVVVVVPPPPUUUUUUUUUUUUUUUUU
+ * V (10-bit) Release/Version
+ * P ( 4-bit) Number of cores - 1
+ * U (18-bit) ID
+ * 01a0 i.......i 32-bit ConfigID
+ * 0200 0...0m..m RunStall core 'n'
+ * 0220 c Cache coherency enabled
+ */
+
+#define MIROUT(irq) (0x000 + (irq))
+#define MIPICAUSE(cpu) (0x100 + (cpu))
+#define MIPISET(cause) (0x140 + (cause))
+#define MIENG 0x180
+#define MIENGSET 0x184
+#define MIASG 0x188 /* Read Global Assert Register */
+#define MIASGSET 0x18c /* Set Global Addert Regiter */
+#define MIPIPART 0x190
+#define SYSCFGID 0x1a0
+#define MPSCORE 0x200
+#define CCON 0x220
+
+#endif /* _XTENSA_MXREGS_H */
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
new file mode 100644
index 000000000..37251b2ef
--- /dev/null
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+static inline void init_mmu(void)
+{
+}
+
+static inline void init_kio(void)
+{
+}
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+
+static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm)
+{
+}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
new file mode 100644
index 000000000..37ce25ef9
--- /dev/null
+++ b/arch/xtensa/include/asm/page.h
@@ -0,0 +1,207 @@
+/*
+ * include/asm-xtensa/page.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PAGE_H
+#define _XTENSA_PAGE_H
+
+#include <asm/processor.h>
+#include <asm/types.h>
+#include <asm/cache.h>
+#include <asm/kmem_layout.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef CONFIG_MMU
+#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
+#define PHYS_OFFSET XCHAL_KSEG_PADDR
+#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
+ PHYS_PFN(XCHAL_KSEG_SIZE))
+#else
+#define PAGE_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
+#define PHYS_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
+#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
+#endif
+
+/*
+ * Cache aliasing:
+ *
+ * If the cache size for one way is greater than the page size, we have to
+ * deal with cache aliasing. The cache index is wider than the page size:
+ *
+ * | |cache| cache index
+ * | pfn |off| virtual address
+ * |xxxx:X|zzz|
+ * | : | |
+ * | \ / | |
+ * |trans.| |
+ * | / \ | |
+ * |yyyy:Y|zzz| physical address
+ *
+ * When the page number is translated to the physical page address, the lowest
+ * bit(s) (X) that are part of the cache index are also translated (Y).
+ * If this translation changes bit(s) (X), the cache index is also afected,
+ * thus resulting in a different cache line than before.
+ * The kernel does not provide a mechanism to ensure that the page color
+ * (represented by this bit) remains the same when allocated or when pages
+ * are remapped. When user pages are mapped into kernel space, the color of
+ * the page might also change.
+ *
+ * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
+ * to temporarily map a patch so we can match the color.
+ */
+
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
+# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
+# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
+# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
+#else
+# define DCACHE_ALIAS_ORDER 0
+# define DCACHE_ALIAS(a) ((void)(a), 0)
+#endif
+#define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER)
+
+#if ICACHE_WAY_SIZE > PAGE_SIZE
+# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
+# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
+# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
+# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
+#else
+# define ICACHE_ALIAS_ORDER 0
+#endif
+
+
+#ifdef __ASSEMBLY__
+
+#define __pgprot(x) (x)
+
+#else
+
+/*
+ * These are used to make use of C type-checking..
+ */
+
+typedef struct { unsigned long pte; } pte_t; /* page table entry */
+typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+/*
+ * Pure 2^n version of get_order
+ * Use 'nsau' instructions if supported by the processor or the generic version.
+ */
+
+#if XCHAL_HAVE_NSA
+
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ int lz;
+ asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
+ return 32 - lz;
+}
+
+#else
+
+# include <asm-generic/getorder.h>
+
+#endif
+
+struct page;
+struct vm_area_struct;
+extern void clear_page(void *page);
+extern void copy_page(void *to, void *from);
+
+/*
+ * If we have cache aliasing and writeback caches, we might have to do
+ * some extra work
+ */
+
+#if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
+extern void clear_page_alias(void *vaddr, unsigned long paddr);
+extern void copy_page_alias(void *to, void *from,
+ unsigned long to_paddr, unsigned long from_paddr);
+
+#define clear_user_highpage clear_user_highpage
+void clear_user_highpage(struct page *page, unsigned long vaddr);
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+#else
+# define clear_user_page(page, vaddr, pg) clear_page(page)
+# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+#endif
+
+/*
+ * This handles the memory map. We handle pages at
+ * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
+ * These macros are for conversion of kernel address, not user
+ * addresses.
+ */
+
+#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
+#ifdef CONFIG_MMU
+static inline unsigned long ___pa(unsigned long va)
+{
+ unsigned long off = va - PAGE_OFFSET;
+
+ if (off >= XCHAL_KSEG_SIZE)
+ off -= XCHAL_KSEG_SIZE;
+
+#ifndef CONFIG_XIP_KERNEL
+ return off + PHYS_OFFSET;
+#else
+ if (off < XCHAL_KSEG_SIZE)
+ return off + PHYS_OFFSET;
+
+ off -= XCHAL_KSEG_SIZE;
+ if (off >= XCHAL_KIO_SIZE)
+ off -= XCHAL_KIO_SIZE;
+
+ return off + XCHAL_KIO_PADDR;
+#endif
+}
+#define __pa(x) ___pa((unsigned long)(x))
+#else
+#define __pa(x) \
+ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#endif
+#define __va(x) \
+ ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
+#define pfn_valid(pfn) \
+ ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+#ifdef CONFIG_DISCONTIGMEM
+# error CONFIG_DISCONTIGMEM not supported
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#endif /* __ASSEMBLY__ */
+
+#include <asm-generic/memory_model.h>
+#endif /* _XTENSA_PAGE_H */
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
new file mode 100644
index 000000000..405526912
--- /dev/null
+++ b/arch/xtensa/include/asm/pci-bridge.h
@@ -0,0 +1,85 @@
+/*
+ * include/asm-xtensa/pci-bridge.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PCI_BRIDGE_H
+#define _XTENSA_PCI_BRIDGE_H
+
+struct device_node;
+struct pci_controller;
+
+/*
+ * pciauto_bus_scan() enumerates the pci space.
+ */
+
+extern int pciauto_bus_scan(struct pci_controller *, int);
+
+struct pci_space {
+ unsigned long start;
+ unsigned long end;
+ unsigned long base;
+};
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+
+struct pci_controller {
+ int index; /* used for pci_controller_num */
+ struct pci_controller *next;
+ struct pci_bus *bus;
+ void *arch_data;
+
+ int first_busno;
+ int last_busno;
+
+ struct pci_ops *ops;
+ volatile unsigned int *cfg_addr;
+ volatile unsigned char *cfg_data;
+
+ /* Currently, we limit ourselves to 1 IO range and 3 mem
+ * ranges since the common pci_bus structure can't handle more
+ */
+ struct resource io_resource;
+ struct resource mem_resources[3];
+ int mem_resource_count;
+
+ /* Host bridge I/O and Memory space
+ * Used for BAR placement algorithms
+ */
+ struct pci_space io_space;
+ struct pci_space mem_space;
+
+ /* Return the interrupt number fo a device. */
+ int (*map_irq)(struct pci_dev*, u8, u8);
+
+};
+
+static inline void pcibios_init_resource(struct resource *res,
+ unsigned long start, unsigned long end, int flags, char *name)
+{
+ res->start = start;
+ res->end = end;
+ res->flags = flags;
+ res->name = name;
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+}
+
+
+/* These are used for config access before all the PCI probing has been done. */
+int early_read_config_byte(struct pci_controller*, int, int, int, u8*);
+int early_read_config_word(struct pci_controller*, int, int, int, u16*);
+int early_read_config_dword(struct pci_controller*, int, int, int, u32*);
+int early_write_config_byte(struct pci_controller*, int, int, int, u8);
+int early_write_config_word(struct pci_controller*, int, int, int, u16);
+int early_write_config_dword(struct pci_controller*, int, int, int, u32);
+
+#endif /* _XTENSA_PCI_BRIDGE_H */
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
new file mode 100644
index 000000000..8e2b48a26
--- /dev/null
+++ b/arch/xtensa/include/asm/pci.h
@@ -0,0 +1,49 @@
+/*
+ * linux/include/asm-xtensa/pci.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PCI_H
+#define _XTENSA_PCI_H
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ * already-configured bus numbers - to be used for buggy BIOSes
+ * or architectures with incomplete PCI setup by the loader
+ */
+
+#define pcibios_assign_all_busses() 0
+
+/* Assume some values. (We should revise them, if necessary) */
+
+#define PCIBIOS_MIN_IO 0x2000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+/* Dynamic DMA mapping stuff.
+ * Xtensa has everything mapped statically like x86.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+/* The PCI address space does equal the physical memory address space.
+ * The networking and block device layers use this boolean for bounce buffer
+ * decisions.
+ */
+
+/* Tell PCI code what kind of PCI resource mappings we support */
+#define HAVE_PCI_MMAP 1
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
+#define arch_can_pci_mmap_io() 1
+
+/* Generic PCI */
+#include <asm-generic/pci.h>
+
+#endif /* _XTENSA_PCI_H */
diff --git a/arch/xtensa/include/asm/perf_event.h b/arch/xtensa/include/asm/perf_event.h
new file mode 100644
index 000000000..5aa4590ac
--- /dev/null
+++ b/arch/xtensa/include/asm/perf_event.h
@@ -0,0 +1,4 @@
+#ifndef __ASM_XTENSA_PERF_EVENT_H
+#define __ASM_XTENSA_PERF_EVENT_H
+
+#endif /* __ASM_XTENSA_PERF_EVENT_H */
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
new file mode 100644
index 000000000..d3a22da4d
--- /dev/null
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/asm-xtensa/pgalloc.h
+ *
+ * Copyright (C) 2001-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PGALLOC_H
+#define _XTENSA_PGALLOC_H
+
+#ifdef CONFIG_MMU
+#include <linux/highmem.h>
+#include <linux/slab.h>
+
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#include <asm-generic/pgalloc.h>
+
+/*
+ * Allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+
+#define pmd_populate_kernel(mm, pmdp, ptep) \
+ (pmd_val(*(pmdp)) = ((unsigned long)ptep))
+#define pmd_populate(mm, pmdp, page) \
+ (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline pgd_t*
+pgd_alloc(struct mm_struct *mm)
+{
+ return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
+}
+
+static inline void ptes_clear(pte_t *ptep)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ pte_clear(NULL, 0, ptep + i);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ pte_t *ptep;
+
+ ptep = (pte_t *)__pte_alloc_one_kernel(mm);
+ if (!ptep)
+ return NULL;
+ ptes_clear(ptep);
+ return ptep;
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+ struct page *page;
+
+ page = __pte_alloc_one(mm, GFP_PGTABLE_USER);
+ if (!page)
+ return NULL;
+ ptes_clear(page_address(page));
+ return page;
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif /* CONFIG_MMU */
+
+#endif /* _XTENSA_PGALLOC_H */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
new file mode 100644
index 000000000..4dc04e6c0
--- /dev/null
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -0,0 +1,425 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/asm-xtensa/pgtable.h
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PGTABLE_H
+#define _XTENSA_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/kmem_layout.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/*
+ * We only use two ring levels, user and kernel space.
+ */
+
+#ifdef CONFIG_MMU
+#define USER_RING 1 /* user ring level */
+#else
+#define USER_RING 0
+#endif
+#define KERNEL_RING 0 /* kernel ring level */
+
+/*
+ * The Xtensa architecture port of Linux has a two-level page table system,
+ * i.e. the logical three-level Linux page table layout is folded.
+ * Each task has the following memory page tables:
+ *
+ * PGD table (page directory), ie. 3rd-level page table:
+ * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
+ * (Architectures that don't have the PMD folded point to the PMD tables)
+ *
+ * The pointer to the PGD table for a given task can be retrieved from
+ * the task structure (struct task_struct*) t, e.g. current():
+ * (t->mm ? t->mm : t->active_mm)->pgd
+ *
+ * PMD tables (page middle-directory), ie. 2nd-level page tables:
+ * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1).
+ *
+ * PTE tables (page table entry), ie. 1st-level page tables:
+ * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
+ * invalid_pte_table for absent mappings.
+ *
+ * The individual pages are 4 kB big with special pages for the empty_zero_page.
+ */
+
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * Entries per page directory level: we use two-level, so
+ * we don't really have any PMD directory physically.
+ */
+#define PTRS_PER_PTE 1024
+#define PTRS_PER_PTE_SHIFT 10
+#define PTRS_PER_PGD 1024
+#define PGD_ORDER 0
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0UL
+#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
+
+#ifdef CONFIG_MMU
+/*
+ * Virtual memory area. We keep a distance to other memory regions to be
+ * on the safe side. We also use this area for cache aliasing.
+ */
+#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000)
+#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF)
+#define TLBTEMP_BASE_1 (VMALLOC_START + 0x08000000)
+#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
+#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
+#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
+#else
+#define TLBTEMP_SIZE ICACHE_WAY_SIZE
+#endif
+
+#else
+
+#define VMALLOC_START __XTENSA_UL_CONST(0)
+#define VMALLOC_END __XTENSA_UL_CONST(0xffffffff)
+
+#endif
+
+/*
+ * For the Xtensa architecture, the PTE layout is as follows:
+ *
+ * 31------12 11 10-9 8-6 5-4 3-2 1-0
+ * +-----------------------------------------+
+ * | | Software | HARDWARE |
+ * | PPN | ADW | RI |Attribute|
+ * +-----------------------------------------+
+ * pte_none | MBZ | 01 | 11 | 00 |
+ * +-----------------------------------------+
+ * present | PPN | 0 | 00 | ADW | RI | CA | wx |
+ * +- - - - - - - - - - - - - - - - - - - - -+
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 |
+ * +-----------------------------------------+
+ * swap | index | type | 01 | 11 | 00 |
+ * +-----------------------------------------+
+ *
+ * For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
+ * +-----------------------------------------+
+ * present | PPN | 0 | 00 | ADW | RI | CA | w1 |
+ * +-----------------------------------------+
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 01 | 00 |
+ * +-----------------------------------------+
+ *
+ * Legend:
+ * PPN Physical Page Number
+ * ADW software: accessed (young) / dirty / writable
+ * RI ring (0=privileged, 1=user, 2 and 3 are unused)
+ * CA cache attribute: 00 bypass, 01 writeback, 10 writethrough
+ * (11 is invalid and used to mark pages that are not present)
+ * w page is writable (hw)
+ * x page is executable (hw)
+ * index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
+ * (note that the index is always non-zero)
+ * type swap type (5 bits -> 32 types)
+ *
+ * Notes:
+ * - (PROT_NONE) is a special case of 'present' but causes an exception for
+ * any access (read, write, and execute).
+ * - 'multihit-exception' has the highest priority of all MMU exceptions,
+ * so the ring must be set to 'RING_USER' even for 'non-present' pages.
+ * - on older hardware, the exectuable flag was not supported and
+ * used as a 'valid' flag, so it needs to be always set.
+ * - we need to keep track of certain flags in software (dirty and young)
+ * to do this, we use write exceptions and have a separate software w-flag.
+ * - attribute value 1101 (and 1111 on T1050 and earlier) is reserved
+ */
+
+#define _PAGE_ATTRIB_MASK 0xf
+
+#define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
+#define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
+
+#define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
+#define _PAGE_CA_WB (1<<2) /* write-back */
+#define _PAGE_CA_WT (2<<2) /* write-through */
+#define _PAGE_CA_MASK (3<<2)
+#define _PAGE_CA_INVALID (3<<2)
+
+/* We use invalid attribute values to distinguish special pte entries */
+#if XCHAL_HW_VERSION_MAJOR < 2000
+#define _PAGE_HW_VALID 0x01 /* older HW needed this bit set */
+#define _PAGE_NONE 0x04
+#else
+#define _PAGE_HW_VALID 0x00
+#define _PAGE_NONE 0x0f
+#endif
+
+#define _PAGE_USER (1<<4) /* user access (ring=1) */
+
+/* Software */
+#define _PAGE_WRITABLE_BIT 6
+#define _PAGE_WRITABLE (1<<6) /* software: page writable */
+#define _PAGE_DIRTY (1<<7) /* software: page dirty */
+#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
+
+#ifdef CONFIG_MMU
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
+
+#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
+#define PAGE_SHARED_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
+#else
+# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
+#endif
+
+#else /* no mmu */
+
+# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+# define PAGE_NONE __pgprot(0)
+# define PAGE_SHARED __pgprot(0)
+# define PAGE_COPY __pgprot(0)
+# define PAGE_READONLY __pgprot(0)
+# define PAGE_KERNEL __pgprot(0)
+
+#endif
+
+/*
+ * On certain configurations of Xtensa MMUs (eg. the initial Linux config),
+ * the MMU can't do page protection for execute, and considers that the same as
+ * read. Also, write permissions may imply read permissions.
+ * What follows is the closest we can get by reasonable means..
+ * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
+ */
+#define __P000 PAGE_NONE /* private --- */
+#define __P001 PAGE_READONLY /* private --r */
+#define __P010 PAGE_COPY /* private -w- */
+#define __P011 PAGE_COPY /* private -wr */
+#define __P100 PAGE_READONLY_EXEC /* private x-- */
+#define __P101 PAGE_READONLY_EXEC /* private x-r */
+#define __P110 PAGE_COPY_EXEC /* private xw- */
+#define __P111 PAGE_COPY_EXEC /* private xwr */
+
+#define __S000 PAGE_NONE /* shared --- */
+#define __S001 PAGE_READONLY /* shared --r */
+#define __S010 PAGE_SHARED /* shared -w- */
+#define __S011 PAGE_SHARED /* shared -wr */
+#define __S100 PAGE_READONLY_EXEC /* shared x-- */
+#define __S101 PAGE_READONLY_EXEC /* shared x-r */
+#define __S110 PAGE_SHARED_EXEC /* shared xw- */
+#define __S111 PAGE_SHARED_EXEC /* shared xwr */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern unsigned long empty_zero_page[1024];
+
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#ifdef CONFIG_MMU
+extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
+extern void paging_init(void);
+#else
+# define swapper_pg_dir NULL
+static inline void paging_init(void) { }
+#endif
+
+/*
+ * The pmd contains the kernel virtual address of the pte page.
+ */
+#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
+
+/*
+ * pte status.
+ */
+# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
+#if XCHAL_HW_VERSION_MAJOR < 2000
+# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
+#else
+# define pte_present(pte) \
+ (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
+ || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
+#endif
+#define pte_clear(mm,addr,ptep) \
+ do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
+
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+ { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
+static inline pte_t pte_mkclean(pte_t pte)
+ { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
+static inline pte_t pte_mkold(pte_t pte)
+ { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)
+ { pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)
+ { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte)
+ { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
+
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pte_same(a,b) (pte_val(a) == pte_val(b))
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/*
+ * Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+static inline void update_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+ __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
+#endif
+
+}
+
+struct mm_struct;
+
+static inline void
+set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+ update_pte(ptep, pteval);
+}
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ update_pte(ptep, pteval);
+}
+
+static inline void
+set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ *pmdp = pmdval;
+}
+
+struct vm_area_struct;
+
+static inline int
+ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_young(pte))
+ return 0;
+ update_pte(ptep, pte_mkold(pte));
+ return 1;
+}
+
+static inline pte_t
+ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, addr, ptep);
+ return pte;
+}
+
+static inline void
+ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ update_pte(ptep, pte_wrprotect(pte));
+}
+
+/*
+ * Encode and decode a swap and file entry.
+ */
+#define SWP_TYPE_BITS 5
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+
+#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
+#define __swp_offset(entry) ((entry).val >> 11)
+#define __swp_entry(type,offs) \
+ ((swp_entry_t){((type) << 6) | ((offs) << 11) | \
+ _PAGE_CA_INVALID | _PAGE_USER})
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#endif /* !defined (__ASSEMBLY__) */
+
+
+#ifdef __ASSEMBLY__
+
+/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
+ * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
+ * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long)
+ * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long)
+ *
+ * Note: We require an additional temporary register which can be the same as
+ * the register that holds the address.
+ *
+ * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
+ *
+ */
+#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
+#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
+
+#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
+ _PGD_INDEX(tmp, adr); \
+ addx4 mm, tmp, mm
+
+#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
+ srli pmd, pmd, PAGE_SHIFT; \
+ slli pmd, pmd, PAGE_SHIFT; \
+ addx4 pmd, tmp, pmd
+
+#else
+
+#define kern_addr_valid(addr) (1)
+
+extern void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t *ptep);
+
+typedef pte_t *pte_addr_t;
+
+#endif /* !defined (__ASSEMBLY__) */
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+/* We provide our own get_unmapped_area to cope with
+ * SHM area cache aliasing for userland.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#endif /* _XTENSA_PGTABLE_H */
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
new file mode 100644
index 000000000..354ca942d
--- /dev/null
+++ b/arch/xtensa/include/asm/platform.h
@@ -0,0 +1,66 @@
+/*
+ * Platform specific functions
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PLATFORM_H
+#define _XTENSA_PLATFORM_H
+
+#include <linux/types.h>
+#include <asm/bootparam.h>
+
+/*
+ * platform_init is called before the mmu is initialized to give the
+ * platform a early hook-up. bp_tag_t is a list of configuration tags
+ * passed from the boot-loader.
+ */
+extern void platform_init(bp_tag_t*);
+
+/*
+ * platform_setup is called from setup_arch with a pointer to the command-line
+ * string.
+ */
+extern void platform_setup (char **);
+
+/*
+ * platform_restart is called to restart the system.
+ */
+extern void platform_restart (void);
+
+/*
+ * platform_halt is called to stop the system and halt.
+ */
+extern void platform_halt (void);
+
+/*
+ * platform_power_off is called to stop the system and power it off.
+ */
+extern void platform_power_off (void);
+
+/*
+ * platform_idle is called from the idle function.
+ */
+extern void platform_idle (void);
+
+/*
+ * platform_heartbeat is called every HZ
+ */
+extern void platform_heartbeat (void);
+
+/*
+ * platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
+ */
+extern void platform_calibrate_ccount (void);
+
+/*
+ * Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector.
+ */
+void cpu_reset(void) __attribute__((noreturn));
+
+#endif /* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
new file mode 100644
index 000000000..9dd4efe1b
--- /dev/null
+++ b/arch/xtensa/include/asm/processor.h
@@ -0,0 +1,257 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_PROCESSOR_H
+#define _XTENSA_PROCESSOR_H
+
+#include <asm/core.h>
+
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#include <asm/regs.h>
+
+/* Assertions. */
+
+#if (XCHAL_HAVE_WINDOWED != 1)
+# error Linux requires the Xtensa Windowed Registers Option.
+#endif
+
+/* Xtensa ABI requires stack alignment to be at least 16 */
+
+#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
+
+#define ARCH_SLAB_MINALIGN STACK_ALIGN
+
+/*
+ * User space process size: 1 GB.
+ * Windowed call ABI requires caller and callee to be located within the same
+ * 1 GB region. The C compiler places trampoline code on the stack for sources
+ * that take the address of a nested C function (a feature used by glibc), so
+ * the 1 GB requirement applies to the stack as well.
+ */
+
+#ifdef CONFIG_MMU
+#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
+#else
+#define TASK_SIZE __XTENSA_UL_CONST(0xffffffff)
+#endif
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+/*
+ * General exception cause assigned to fake NMI. Fake NMI needs to be handled
+ * differently from other interrupts, but it uses common kernel entry/exit
+ * code.
+ */
+
+#define EXCCAUSE_MAPPED_NMI 62
+
+/*
+ * General exception cause assigned to debug exceptions. Debug exceptions go
+ * to their own vector, rather than the general exception vectors (user,
+ * kernel, double); and their specific causes are reported via DEBUGCAUSE
+ * rather than EXCCAUSE. However it is sometimes convenient to redirect debug
+ * exceptions to the general exception mechanism. To do this, an otherwise
+ * unused EXCCAUSE value was assigned to debug exceptions for this purpose.
+ */
+
+#define EXCCAUSE_MAPPED_DEBUG 63
+
+/*
+ * We use DEPC also as a flag to distinguish between double and regular
+ * exceptions. For performance reasons, DEPC might contain the value of
+ * EXCCAUSE for regular exceptions, so we use this definition to mark a
+ * valid double exception address.
+ * (Note: We use it in bgeui, so it should be 64, 128, or 256)
+ */
+
+#define VALID_DOUBLE_EXCEPTION_ADDRESS 64
+
+#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
+#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
+
+#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
+#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
+
+#define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
+#define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)
+
+#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
+
+/* LOCKLEVEL defines the interrupt level that masks all
+ * general-purpose interrupts.
+ */
+#if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
+#define LOCKLEVEL (PROFILING_INTLEVEL - 1)
+#else
+#define LOCKLEVEL XCHAL_EXCM_LEVEL
+#endif
+
+#define TOPLEVEL XCHAL_EXCM_LEVEL
+#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
+
+/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
+ * registers
+ */
+#define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */
+#define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */
+
+#ifndef __ASSEMBLY__
+
+/* Build a valid return address for the specified call winsize.
+ * winsize must be 1 (call4), 2 (call8), or 3 (call12)
+ */
+#define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
+
+/* Convert return address to a valid pc
+ * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ */
+#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp. reg must be in the range [0..4).
+ */
+#define SPILL_SLOT(sp, reg) (*(((unsigned long *)(sp)) - 4 + (reg)))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp for the call8. reg must be in the range [4..8).
+ */
+#define SPILL_SLOT_CALL8(sp, reg) (*(((unsigned long *)(sp)) - 12 + (reg)))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp for the call12. reg must be in the range [4..12).
+ */
+#define SPILL_SLOT_CALL12(sp, reg) (*(((unsigned long *)(sp)) - 16 + (reg)))
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct {
+
+ /* kernel's return address and stack pointer for context switching */
+ unsigned long ra; /* kernel's a0: return address and window call size */
+ unsigned long sp; /* kernel's a1: stack pointer */
+
+ mm_segment_t current_ds; /* see uaccess.h for example uses */
+
+ /* struct xtensa_cpuinfo info; */
+
+ unsigned long bad_vaddr; /* last user fault */
+ unsigned long bad_uaddr; /* last kernel fault accessing user space */
+ unsigned long error_code;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK];
+ struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK];
+#endif
+ /* Make structure 16 bytes aligned. */
+ int align[0] __attribute__ ((aligned(16)));
+};
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
+
+#define INIT_THREAD \
+{ \
+ ra: 0, \
+ sp: sizeof(init_stack) + (long) &init_stack, \
+ current_ds: {0}, \
+ /*info: {0}, */ \
+ bad_vaddr: 0, \
+ bad_uaddr: 0, \
+ error_code: 0, \
+}
+
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ * Note: When windowed ABI is used for userspace we set-up ps
+ * as if we did a call4 to the new pc.
+ * set_thread_state in signal.c depends on it.
+ */
+#if IS_ENABLED(CONFIG_USER_ABI_CALL0)
+#define USER_PS_VALUE ((USER_RING << PS_RING_SHIFT) | \
+ (1 << PS_UM_BIT) | \
+ (1 << PS_EXCM_BIT))
+#else
+#define USER_PS_VALUE (PS_WOE_MASK | \
+ (1 << PS_CALLINC_SHIFT) | \
+ (USER_RING << PS_RING_SHIFT) | \
+ (1 << PS_UM_BIT) | \
+ (1 << PS_EXCM_BIT))
+#endif
+
+/* Clearing a0 terminates the backtrace. */
+#define start_thread(regs, new_pc, new_sp) \
+ do { \
+ unsigned long syscall = (regs)->syscall; \
+ memset((regs), 0, sizeof(*(regs))); \
+ (regs)->pc = (new_pc); \
+ (regs)->ps = USER_PS_VALUE; \
+ (regs)->areg[1] = (new_sp); \
+ (regs)->areg[0] = 0; \
+ (regs)->wmask = 1; \
+ (regs)->depc = 0; \
+ (regs)->windowbase = 0; \
+ (regs)->windowstart = 1; \
+ (regs)->syscall = syscall; \
+ } while (0)
+
+/* Forward declaration */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while(0)
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
+
+#define cpu_relax() barrier()
+
+/* Special register access. */
+
+#define xtensa_set_sr(x, sr) \
+ ({ \
+ __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: \
+ "a"((unsigned int)(x))); \
+ })
+
+#define xtensa_get_sr(sr) \
+ ({ \
+ unsigned int v; \
+ __asm__ __volatile__ ("rsr %0, "__stringify(sr) : "=a"(v)); \
+ v; \
+ })
+
+#if XCHAL_HAVE_EXTERN_REGS
+
+static inline void set_er(unsigned long value, unsigned long addr)
+{
+ asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory");
+}
+
+static inline unsigned long get_er(unsigned long addr)
+{
+ register unsigned long value;
+ asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory");
+ return value;
+}
+
+#endif /* XCHAL_HAVE_EXTERN_REGS */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_PROCESSOR_H */
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
new file mode 100644
index 000000000..b109416dc
--- /dev/null
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -0,0 +1,117 @@
+/*
+ * include/asm-xtensa/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _XTENSA_PTRACE_H
+#define _XTENSA_PTRACE_H
+
+#include <asm/kmem_layout.h>
+#include <uapi/asm/ptrace.h>
+
+/*
+ * Kernel stack
+ *
+ * +-----------------------+ -------- STACK_SIZE
+ * | register file | |
+ * +-----------------------+ |
+ * | struct pt_regs | |
+ * +-----------------------+ | ------ PT_REGS_OFFSET
+ * double : 16 bytes spill area : | ^
+ * excetion :- - - - - - - - - - - -: | |
+ * frame : struct pt_regs : | |
+ * :- - - - - - - - - - - -: | |
+ * | | | |
+ * | memory stack | | |
+ * | | | |
+ * ~ ~ ~ ~
+ * ~ ~ ~ ~
+ * | | | |
+ * | | | |
+ * +-----------------------+ | | --- STACK_BIAS
+ * | struct task_struct | | | ^
+ * current --> +-----------------------+ | | |
+ * | struct thread_info | | | |
+ * +-----------------------+ --------
+ */
+
+#define NO_SYSCALL (-1)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/coprocessor.h>
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ */
+struct pt_regs {
+ unsigned long pc; /* 4 */
+ unsigned long ps; /* 8 */
+ unsigned long depc; /* 12 */
+ unsigned long exccause; /* 16 */
+ unsigned long excvaddr; /* 20 */
+ unsigned long debugcause; /* 24 */
+ unsigned long wmask; /* 28 */
+ unsigned long lbeg; /* 32 */
+ unsigned long lend; /* 36 */
+ unsigned long lcount; /* 40 */
+ unsigned long sar; /* 44 */
+ unsigned long windowbase; /* 48 */
+ unsigned long windowstart; /* 52 */
+ unsigned long syscall; /* 56 */
+ unsigned long icountlevel; /* 60 */
+ unsigned long scompare1; /* 64 */
+ unsigned long threadptr; /* 68 */
+
+ /* Additional configurable registers that are used by the compiler. */
+ xtregs_opt_t xtregs_opt;
+
+ /* Make sure the areg field is 16 bytes aligned. */
+ int align[0] __attribute__ ((aligned(16)));
+
+ /* current register frame.
+ * Note: The ESF for kernel exceptions ends after 16 registers!
+ */
+ unsigned long areg[16];
+};
+
+#include <asm/core.h>
+
+# define arch_has_single_step() (1)
+# define task_pt_regs(tsk) ((struct pt_regs*) \
+ (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
+# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
+# define instruction_pointer(regs) ((regs)->pc)
+# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
+ (regs)->areg[1]))
+
+# ifndef CONFIG_SMP
+# define profile_pc(regs) instruction_pointer(regs)
+# else
+# define profile_pc(regs) \
+ ({ \
+ in_lock_functions(instruction_pointer(regs)) ? \
+ return_pointer(regs) : instruction_pointer(regs); \
+ })
+# endif
+
+#define user_stack_pointer(regs) ((regs)->areg[1])
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->areg[2];
+}
+
+#else /* __ASSEMBLY__ */
+
+# include <asm/asm-offsets.h>
+#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _XTENSA_PTRACE_H */
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
new file mode 100644
index 000000000..ce184e7de
--- /dev/null
+++ b/arch/xtensa/include/asm/regs.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2006 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+#ifndef _XTENSA_REGS_H
+#define _XTENSA_REGS_H
+
+/* Special registers. */
+
+#define SREG_MR 32
+#define SREG_IBREAKENABLE 96
+#define SREG_IBREAKA 128
+#define SREG_DBREAKA 144
+#define SREG_DBREAKC 160
+#define SREG_EPC 176
+#define SREG_EPS 192
+#define SREG_EXCSAVE 208
+#define SREG_CCOMPARE 240
+#define SREG_MISC 244
+
+/* EXCCAUSE register fields */
+
+#define EXCCAUSE_EXCCAUSE_SHIFT 0
+#define EXCCAUSE_EXCCAUSE_MASK 0x3F
+
+#define EXCCAUSE_ILLEGAL_INSTRUCTION 0
+#define EXCCAUSE_SYSTEM_CALL 1
+#define EXCCAUSE_INSTRUCTION_FETCH_ERROR 2
+#define EXCCAUSE_LOAD_STORE_ERROR 3
+#define EXCCAUSE_LEVEL1_INTERRUPT 4
+#define EXCCAUSE_ALLOCA 5
+#define EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6
+#define EXCCAUSE_SPECULATION 7
+#define EXCCAUSE_PRIVILEGED 8
+#define EXCCAUSE_UNALIGNED 9
+#define EXCCAUSE_INSTR_DATA_ERROR 12
+#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
+#define EXCCAUSE_INSTR_ADDR_ERROR 14
+#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
+#define EXCCAUSE_ITLB_MISS 16
+#define EXCCAUSE_ITLB_MULTIHIT 17
+#define EXCCAUSE_ITLB_PRIVILEGE 18
+#define EXCCAUSE_ITLB_SIZE_RESTRICTION 19
+#define EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20
+#define EXCCAUSE_DTLB_MISS 24
+#define EXCCAUSE_DTLB_MULTIHIT 25
+#define EXCCAUSE_DTLB_PRIVILEGE 26
+#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
+#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
+#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
+#define EXCCAUSE_COPROCESSOR0_DISABLED 32
+#define EXCCAUSE_COPROCESSOR1_DISABLED 33
+#define EXCCAUSE_COPROCESSOR2_DISABLED 34
+#define EXCCAUSE_COPROCESSOR3_DISABLED 35
+#define EXCCAUSE_COPROCESSOR4_DISABLED 36
+#define EXCCAUSE_COPROCESSOR5_DISABLED 37
+#define EXCCAUSE_COPROCESSOR6_DISABLED 38
+#define EXCCAUSE_COPROCESSOR7_DISABLED 39
+#define EXCCAUSE_N 64
+
+/* PS register fields. */
+
+#define PS_WOE_BIT 18
+#define PS_WOE_MASK 0x00040000
+#define PS_CALLINC_SHIFT 16
+#define PS_CALLINC_MASK 0x00030000
+#define PS_OWB_SHIFT 8
+#define PS_OWB_WIDTH 4
+#define PS_OWB_MASK 0x00000F00
+#define PS_RING_SHIFT 6
+#define PS_RING_MASK 0x000000C0
+#define PS_UM_BIT 5
+#define PS_EXCM_BIT 4
+#define PS_INTLEVEL_SHIFT 0
+#define PS_INTLEVEL_WIDTH 4
+#define PS_INTLEVEL_MASK 0x0000000F
+
+/* DBREAKCn register fields. */
+
+#define DBREAKC_MASK_BIT 0
+#define DBREAKC_MASK_MASK 0x0000003F
+#define DBREAKC_LOAD_BIT 30
+#define DBREAKC_LOAD_MASK 0x40000000
+#define DBREAKC_STOR_BIT 31
+#define DBREAKC_STOR_MASK 0x80000000
+
+/* DEBUGCAUSE register fields. */
+
+#define DEBUGCAUSE_DBNUM_MASK 0xf00
+#define DEBUGCAUSE_DBNUM_SHIFT 8 /* First bit of DBNUM field */
+#define DEBUGCAUSE_DEBUGINT_BIT 5 /* External debug interrupt */
+#define DEBUGCAUSE_BREAKN_BIT 4 /* BREAK.N instruction */
+#define DEBUGCAUSE_BREAK_BIT 3 /* BREAK instruction */
+#define DEBUGCAUSE_DBREAK_BIT 2 /* DBREAK match */
+#define DEBUGCAUSE_IBREAK_BIT 1 /* IBREAK match */
+#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
+
+#endif /* _XTENSA_SPECREG_H */
diff --git a/arch/xtensa/include/asm/serial.h b/arch/xtensa/include/asm/serial.h
new file mode 100644
index 000000000..a8a249326
--- /dev/null
+++ b/arch/xtensa/include/asm/serial.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-xtensa/serial.h
+ *
+ * Configuration details for 8250, 16450, 16550, etc. serial ports
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SERIAL_H
+#define _XTENSA_SERIAL_H
+
+#include <platform/serial.h>
+
+#endif /* _XTENSA_SERIAL_H */
diff --git a/arch/xtensa/include/asm/shmparam.h b/arch/xtensa/include/asm/shmparam.h
new file mode 100644
index 000000000..c8cc16c3d
--- /dev/null
+++ b/arch/xtensa/include/asm/shmparam.h
@@ -0,0 +1,21 @@
+/*
+ * include/asm-xtensa/shmparam.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_SHMPARAM_H
+#define _XTENSA_SHMPARAM_H
+
+/*
+ * Xtensa can have variable size caches, and if
+ * the size of single way is larger than the page size,
+ * then we have to start worrying about cache aliasing
+ * problems.
+ */
+
+#define SHMLBA ((PAGE_SIZE > DCACHE_WAY_SIZE)? PAGE_SIZE : DCACHE_WAY_SIZE)
+
+#endif /* _XTENSA_SHMPARAM_H */
diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
new file mode 100644
index 000000000..de169b4ea
--- /dev/null
+++ b/arch/xtensa/include/asm/signal.h
@@ -0,0 +1,23 @@
+/*
+ * include/asm-xtensa/signal.h
+ *
+ * Swiped from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _XTENSA_SIGNAL_H
+#define _XTENSA_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+#ifndef __ASSEMBLY__
+#define __ARCH_HAS_SA_RESTORER
+
+#include <asm/sigcontext.h>
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_SIGNAL_H */
diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h
new file mode 100644
index 000000000..4e43f5643
--- /dev/null
+++ b/arch/xtensa/include/asm/smp.h
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SMP_H
+#define _XTENSA_SMP_H
+
+#ifdef CONFIG_SMP
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define cpu_logical_map(cpu) (cpu)
+
+struct start_info {
+ unsigned long stack;
+};
+extern struct start_info start_info;
+
+struct cpumask;
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_single_ipi(int cpu);
+
+void smp_init_cpus(void);
+void secondary_init_irq(void);
+void ipi_init(void);
+struct seq_file;
+void show_ipi_list(struct seq_file *p, int prec);
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+void __cpu_die(unsigned int cpu);
+int __cpu_disable(void);
+void cpu_die(void);
+void cpu_restart(void);
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* CONFIG_SMP */
+
+#endif /* _XTENSA_SMP_H */
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
new file mode 100644
index 000000000..584b0de6f
--- /dev/null
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -0,0 +1,20 @@
+/*
+ * include/asm-xtensa/spinlock.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SPINLOCK_H
+#define _XTENSA_SPINLOCK_H
+
+#include <asm/barrier.h>
+#include <asm/qrwlock.h>
+#include <asm/qspinlock.h>
+
+#define smp_mb__after_spinlock() smp_mb()
+
+#endif /* _XTENSA_SPINLOCK_H */
diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h
new file mode 100644
index 000000000..64c938925
--- /dev/null
+++ b/arch/xtensa/include/asm/spinlock_types.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
+# error "please don't include this file directly"
+#endif
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
+
+#endif
diff --git a/arch/xtensa/include/asm/stackprotector.h b/arch/xtensa/include/asm/stackprotector.h
new file mode 100644
index 000000000..e368f94fd
--- /dev/null
+++ b/arch/xtensa/include/asm/stackprotector.h
@@ -0,0 +1,40 @@
+/*
+ * GCC stack protector support.
+ *
+ * (This is directly adopted from the ARM implementation)
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h
new file mode 100644
index 000000000..fe06e8ed1
--- /dev/null
+++ b/arch/xtensa/include/asm/stacktrace.h
@@ -0,0 +1,44 @@
+/*
+ * arch/xtensa/include/asm/stacktrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_STACKTRACE_H
+#define _XTENSA_STACKTRACE_H
+
+#include <linux/sched.h>
+
+struct stackframe {
+ unsigned long pc;
+ unsigned long sp;
+};
+
+static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+{
+ unsigned long *sp;
+
+ if (!task || task == current)
+ __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
+ else
+ sp = (unsigned long *)task->thread.sp;
+
+ return sp;
+}
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data);
+
+void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
+ int (*kfn)(struct stackframe *frame, void *data),
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data);
+void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data);
+
+#endif /* _XTENSA_STACKTRACE_H */
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
new file mode 100644
index 000000000..89b51a0c7
--- /dev/null
+++ b/arch/xtensa/include/asm/string.h
@@ -0,0 +1,140 @@
+/*
+ * include/asm-xtensa/string.h
+ *
+ * These trivial string functions are considered part of the public domain.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+/* We should optimize these. See arch/xtensa/lib/strncpy_user.S */
+
+#ifndef _XTENSA_STRING_H
+#define _XTENSA_STRING_H
+
+#define __HAVE_ARCH_STRCPY
+static inline char *strcpy(char *__dest, const char *__src)
+{
+ register char *__xdest = __dest;
+ unsigned long __dummy;
+
+ __asm__ __volatile__("1:\n\t"
+ "l8ui %2, %1, 0\n\t"
+ "s8i %2, %0, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "addi %0, %0, 1\n\t"
+ "bnez %2, 1b\n\t"
+ : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
+ : "0" (__dest), "1" (__src)
+ : "memory");
+
+ return __xdest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+static inline char *strncpy(char *__dest, const char *__src, size_t __n)
+{
+ register char *__xdest = __dest;
+ unsigned long __dummy;
+
+ if (__n == 0)
+ return __xdest;
+
+ __asm__ __volatile__(
+ "1:\n\t"
+ "l8ui %2, %1, 0\n\t"
+ "s8i %2, %0, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "bne %1, %5, 1b\n"
+ "2:"
+ : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
+ : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n)
+ : "memory");
+
+ return __xdest;
+}
+
+#define __HAVE_ARCH_STRCMP
+static inline int strcmp(const char *__cs, const char *__ct)
+{
+ register int __res;
+ unsigned long __dummy;
+
+ __asm__ __volatile__(
+ "1:\n\t"
+ "l8ui %3, %1, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "l8ui %2, %0, 0\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "beq %2, %3, 1b\n"
+ "2:\n\t"
+ "sub %2, %2, %3"
+ : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
+ : "0" (__cs), "1" (__ct));
+
+ return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
+{
+ register int __res;
+ unsigned long __dummy;
+
+ __asm__ __volatile__(
+ "mov %2, %3\n"
+ "1:\n\t"
+ "beq %0, %6, 2f\n\t"
+ "l8ui %3, %1, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "l8ui %2, %0, 0\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "beqz %3, 2f\n\t"
+ "beq %2, %3, 1b\n"
+ "2:\n\t"
+ "sub %2, %2, %3"
+ : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
+ : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n));
+
+ return __res;
+}
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
+
+/* Don't build bcopy at all ... */
+#define __HAVE_ARCH_BCOPY
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+#endif
+
+#endif /* _XTENSA_STRING_H */
diff --git a/arch/xtensa/include/asm/switch_to.h b/arch/xtensa/include/asm/switch_to.h
new file mode 100644
index 000000000..6b73bf0eb
--- /dev/null
+++ b/arch/xtensa/include/asm/switch_to.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SWITCH_TO_H
+#define _XTENSA_SWITCH_TO_H
+
+/* * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ */
+extern void *_switch_to(void *last, void *next);
+
+#define switch_to(prev,next,last) \
+do { \
+ (last) = _switch_to(prev, next); \
+} while(0)
+
+#endif /* _XTENSA_SWITCH_TO_H */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
new file mode 100644
index 000000000..f9a671cbf
--- /dev/null
+++ b/arch/xtensa/include/asm/syscall.h
@@ -0,0 +1,87 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H
+
+#include <linux/err.h>
+#include <asm/ptrace.h>
+#include <uapi/linux/audit.h>
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_XTENSA;
+}
+
+typedef void (*syscall_t)(void);
+extern syscall_t sys_call_table[];
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->syscall;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* Do nothing. */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* 0 if syscall succeeded, otherwise -Errorcode */
+ return IS_ERR_VALUE(regs->areg[2]) ? regs->areg[2] : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->areg[2];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->areg[2] = (long) error ? error : val;
+}
+
+#define SYSCALL_MAX_ARGS 6
+#define XTENSA_SYSCALL_ARGUMENT_REGS {6, 3, 4, 5, 8, 9}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
+ unsigned int i;
+
+ for (i = 0; i < 6; ++i)
+ args[i] = regs->areg[reg[i]];
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
+ unsigned int i;
+
+ for (i = 0; i < 6; ++i)
+ regs->areg[reg[i]] = args[i];
+}
+
+asmlinkage long xtensa_rt_sigreturn(void);
+asmlinkage long xtensa_shmat(int, char __user *, int);
+asmlinkage long xtensa_fadvise64_64(int, int,
+ unsigned long long, unsigned long long);
+
+#endif
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
new file mode 100644
index 000000000..552cdfd85
--- /dev/null
+++ b/arch/xtensa/include/asm/sysmem.h
@@ -0,0 +1,19 @@
+/*
+ * sysmem-related prototypes.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_SYSMEM_H
+#define _XTENSA_SYSMEM_H
+
+#include <linux/memblock.h>
+
+void bootmem_init(void);
+void zones_init(void);
+
+#endif /* _XTENSA_SYSMEM_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
new file mode 100644
index 000000000..a312333a9
--- /dev/null
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -0,0 +1,139 @@
+/*
+ * include/asm-xtensa/thread_info.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_THREAD_INFO_H
+#define _XTENSA_THREAD_INFO_H
+
+#include <linux/stringify.h>
+#include <asm/kmem_layout.h>
+
+#define CURRENT_SHIFT KERNEL_STACK_SHIFT
+
+#ifndef __ASSEMBLY__
+# include <asm/processor.h>
+#endif
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+
+#ifndef __ASSEMBLY__
+
+#if XTENSA_HAVE_COPROCESSORS
+
+typedef struct xtregs_coprocessor {
+ xtregs_cp0_t cp0;
+ xtregs_cp1_t cp1;
+ xtregs_cp2_t cp2;
+ xtregs_cp3_t cp3;
+ xtregs_cp4_t cp4;
+ xtregs_cp5_t cp5;
+ xtregs_cp6_t cp6;
+ xtregs_cp7_t cp7;
+} xtregs_coprocessor_t;
+
+#endif
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ unsigned long status; /* thread-synchronous flags */
+ __u32 cpu; /* current CPU */
+ __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
+
+ mm_segment_t addr_limit; /* thread address space */
+
+ unsigned long cpenable;
+#if XCHAL_HAVE_EXCLUSIVE
+ /* result of the most recent exclusive store */
+ unsigned long atomctl8;
+#endif
+
+ /* Allocate storage for extra user states and coprocessor states. */
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_coprocessor_t xtregs_cp;
+#endif
+ xtregs_user_t xtregs_user;
+};
+
+#endif
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+}
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ struct thread_info *ti;
+ __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
+ "xor %0, a1, %0" : "=&r" (ti) : );
+ return ti;
+}
+
+#else /* !__ASSEMBLY__ */
+
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg,sp) \
+ extui reg, sp, 0, CURRENT_SHIFT; \
+ xor reg, sp, reg
+#endif
+
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
+#define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */
+#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
+#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
+#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
+#define TIF_DB_DISABLED 8 /* debug trap disabled for syscall */
+#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
+#define TIF_SECCOMP 10 /* secure computing */
+#define TIF_MEMDIE 11 /* is terminating due to OOM killer */
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+
+#define _TIF_WORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
+ _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
+
+#define THREAD_SIZE KERNEL_STACK_SIZE
+#define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)
+
+#endif /* _XTENSA_THREAD_INFO */
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
new file mode 100644
index 000000000..3f2462f2d
--- /dev/null
+++ b/arch/xtensa/include/asm/timex.h
@@ -0,0 +1,60 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TIMEX_H
+#define _XTENSA_TIMEX_H
+
+#include <asm/processor.h>
+
+#if XCHAL_NUM_TIMERS > 0 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 0
+# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
+#elif XCHAL_NUM_TIMERS > 1 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 1
+# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
+#elif XCHAL_NUM_TIMERS > 2 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 2
+# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
+#else
+# error "Bad timer number for Linux configurations!"
+#endif
+
+extern unsigned long ccount_freq;
+
+void local_timer_setup(unsigned cpu);
+
+/*
+ * Register access.
+ */
+
+static inline unsigned long get_ccount (void)
+{
+ return xtensa_get_sr(ccount);
+}
+
+static inline void set_ccount (unsigned long ccount)
+{
+ xtensa_set_sr(ccount, ccount);
+}
+
+static inline unsigned long get_linux_timer (void)
+{
+ return xtensa_get_sr(SREG_CCOMPARE + LINUX_TIMER);
+}
+
+static inline void set_linux_timer (unsigned long ccompare)
+{
+ xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
+}
+
+#include <asm-generic/timex.h>
+
+#endif /* _XTENSA_TIMEX_H */
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
new file mode 100644
index 000000000..508899351
--- /dev/null
+++ b/arch/xtensa/include/asm/tlb.h
@@ -0,0 +1,21 @@
+/*
+ * include/asm-xtensa/tlb.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TLB_H
+#define _XTENSA_TLB_H
+
+#include <asm/cache.h>
+#include <asm/page.h>
+
+#include <asm-generic/tlb.h>
+
+#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
+
+#endif /* _XTENSA_TLB_H */
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
new file mode 100644
index 000000000..856e2da2e
--- /dev/null
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -0,0 +1,205 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TLBFLUSH_H
+#define _XTENSA_TLBFLUSH_H
+
+#include <linux/stringify.h>
+#include <asm/processor.h>
+
+#define DTLB_WAY_PGD 7
+
+#define ITLB_ARF_WAYS 4
+#define DTLB_ARF_WAYS 4
+
+#define ITLB_HIT_BIT 3
+#define DTLB_HIT_BIT 4
+
+#ifndef __ASSEMBLY__
+
+/* TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(mm, vmaddr) flushes a single page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ */
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long page);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#ifdef CONFIG_SMP
+
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *);
+void flush_tlb_page(struct vm_area_struct *, unsigned long);
+void flush_tlb_range(struct vm_area_struct *, unsigned long,
+ unsigned long);
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#else /* !CONFIG_SMP */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
+ end)
+#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
+ end)
+
+#endif /* CONFIG_SMP */
+
+/* TLB operations. */
+
+static inline unsigned long itlb_probe(unsigned long addr)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
+ return tmp;
+}
+
+static inline unsigned long dtlb_probe(unsigned long addr)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
+ return tmp;
+}
+
+static inline void invalidate_itlb_entry (unsigned long probe)
+{
+ __asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
+}
+
+static inline void invalidate_dtlb_entry (unsigned long probe)
+{
+ __asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
+}
+
+/* Use the .._no_isync functions with caution. Generally, these are
+ * handy for bulk invalidates followed by a single 'isync'. The
+ * caller must follow up with an 'isync', which can be relatively
+ * expensive on some Xtensa implementations.
+ */
+static inline void invalidate_itlb_entry_no_isync (unsigned entry)
+{
+ /* Caller must follow up with 'isync'. */
+ __asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
+}
+
+static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
+{
+ /* Caller must follow up with 'isync'. */
+ __asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
+}
+
+static inline void set_itlbcfg_register (unsigned long val)
+{
+ __asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
+ : : "a" (val));
+}
+
+static inline void set_dtlbcfg_register (unsigned long val)
+{
+ __asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t"
+ : : "a" (val));
+}
+
+static inline void set_ptevaddr_register (unsigned long val)
+{
+ __asm__ __volatile__(" wsr %0, ptevaddr; isync\n"
+ : : "a" (val));
+}
+
+static inline unsigned long read_ptevaddr_register (void)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp));
+ return tmp;
+}
+
+static inline void write_dtlb_entry (pte_t entry, int way)
+{
+ __asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
+ : : "r" (way), "r" (entry) );
+}
+
+static inline void write_itlb_entry (pte_t entry, int way)
+{
+ __asm__ __volatile__("witlb %1, %0; isync\n\t"
+ : : "r" (way), "r" (entry) );
+}
+
+static inline void invalidate_page_directory (void)
+{
+ invalidate_dtlb_entry (DTLB_WAY_PGD);
+ invalidate_dtlb_entry (DTLB_WAY_PGD+1);
+ invalidate_dtlb_entry (DTLB_WAY_PGD+2);
+}
+
+static inline void invalidate_itlb_mapping (unsigned address)
+{
+ unsigned long tlb_entry;
+ if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
+ invalidate_itlb_entry(tlb_entry);
+}
+
+static inline void invalidate_dtlb_mapping (unsigned address)
+{
+ unsigned long tlb_entry;
+ if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
+ invalidate_dtlb_entry(tlb_entry);
+}
+
+/*
+ * DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
+ * ISA and exist only for test purposes..
+ * You may find it helpful for MMU debugging, however.
+ *
+ * 'at' is the unmodified input register
+ * 'as' is the output register, as follows (specific to the Linux config):
+ *
+ * as[31..12] contain the virtual address
+ * as[11..08] are meaningless
+ * as[07..00] contain the asid
+ */
+
+static inline unsigned long read_dtlb_virtual (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_dtlb_translation (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_itlb_virtual (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_itlb_translation (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_TLBFLUSH_H */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
new file mode 100644
index 000000000..f720a57d0
--- /dev/null
+++ b/arch/xtensa/include/asm/traps.h
@@ -0,0 +1,118 @@
+/*
+ * arch/xtensa/include/asm/traps.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Tensilica Inc.
+ */
+#ifndef _XTENSA_TRAPS_H
+#define _XTENSA_TRAPS_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Per-CPU exception handling data structure.
+ * EXCSAVE1 points to it.
+ */
+struct exc_table {
+ /* Kernel Stack */
+ void *kstk;
+ /* Double exception save area for a0 */
+ unsigned long double_save;
+ /* Fixup handler */
+ void *fixup;
+ /* For passing a parameter to fixup */
+ void *fixup_param;
+ /* Fast user exception handlers */
+ void *fast_user_handler[EXCCAUSE_N];
+ /* Fast kernel exception handlers */
+ void *fast_kernel_handler[EXCCAUSE_N];
+ /* Default C-Handlers */
+ void *default_handler[EXCCAUSE_N];
+};
+
+/*
+ * handler must be either of the following:
+ * void (*)(struct pt_regs *regs);
+ * void (*)(struct pt_regs *regs, unsigned long exccause);
+ */
+extern void * __init trap_set_handler(int cause, void *handler);
+extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
+void fast_second_level_miss(void);
+
+/* Initialize minimal exc_table structure sufficient for basic paging */
+static inline void __init early_trap_init(void)
+{
+ static struct exc_table exc_table __initdata = {
+ .fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
+ fast_second_level_miss,
+ };
+ __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table));
+}
+
+void secondary_trap_init(void);
+
+static inline void spill_registers(void)
+{
+#if XCHAL_NUM_AREGS > 16
+ __asm__ __volatile__ (
+ " call8 1f\n"
+ " _j 2f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+#if XCHAL_NUM_AREGS == 32
+ " _entry a1, 32\n"
+ " addi a8, a0, 3\n"
+ " _entry a1, 16\n"
+ " mov a12, a12\n"
+ " retw\n"
+#else
+ " _entry a1, 48\n"
+ " call12 1f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
+ " _entry a1, 48\n"
+ " mov a12, a0\n"
+ " .endr\n"
+ " _entry a1, 16\n"
+#if XCHAL_NUM_AREGS % 12 == 0
+ " mov a12, a12\n"
+#elif XCHAL_NUM_AREGS % 12 == 4
+ " mov a4, a4\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+ " mov a8, a8\n"
+#endif
+ " retw\n"
+#endif
+ "2:\n"
+ : : : "a8", "a9", "memory");
+#else
+ __asm__ __volatile__ (
+ " mov a12, a12\n"
+ : : : "memory");
+#endif
+}
+
+struct debug_table {
+ /* Pointer to debug exception handler */
+ void (*debug_exception)(void);
+ /* Temporary register save area */
+ unsigned long debug_save[1];
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ /* Save area for DBREAKC registers */
+ unsigned long dbreakc_save[XCHAL_NUM_DBREAK];
+ /* Saved ICOUNT register */
+ unsigned long icount_save;
+ /* Saved ICOUNTLEVEL register */
+ unsigned long icount_level_save;
+#endif
+};
+
+void debug_exception(void);
+
+#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
new file mode 100644
index 000000000..5c9fb8005
--- /dev/null
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -0,0 +1,320 @@
+/*
+ * include/asm-xtensa/uaccess.h
+ *
+ * User space memory access functions
+ *
+ * These routines provide basic accessing functions to the user memory
+ * space for the kernel. This header file provides functions such as:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UACCESS_H
+#define _XTENSA_UACCESS_H
+
+#include <linux/prefetch.h>
+#include <asm/types.h>
+#include <asm/extable.h>
+
+/*
+ * The fs value determines whether argument validity checking should
+ * be performed or not. If get_fs() == USER_DS, checking is
+ * performed, with get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons (Data Segment Register?), these macros are
+ * grossly misnamed.
+ */
+
+#define KERNEL_DS ((mm_segment_t) { 0 })
+#define USER_DS ((mm_segment_t) { 1 })
+
+#define get_fs() (current->thread.current_ds)
+#define set_fs(val) (current->thread.current_ds = (val))
+
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
+
+#define __kernel_ok (uaccess_kernel())
+#define __user_ok(addr, size) \
+ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
+#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
+#define access_ok(addr, size) __access_ok((unsigned long)(addr), (size))
+
+#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
+
+/*
+ * These are the main single-value transfer routines. They
+ * automatically use the right size if we just have the right pointer
+ * type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in
+ * "get_user()" and yet we don't want to do any pointers, because that
+ * is too much of a performance impact. Thus we have a few rather ugly
+ * macros here, and hide all the uglyness from the user.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
+#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ */
+#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+
+extern long __put_user_bad(void);
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err; \
+ __put_user_size((x), (ptr), (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ if (access_ok(__pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ int __cb; \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \
+ case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \
+ case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
+ case 8: { \
+ __typeof__(*ptr) __v64 = x; \
+ retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0; \
+ break; \
+ } \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+
+/*
+ * Consider a case of a user single load/store would cause both an
+ * unaligned exception and an MMU-related exception (unaligned
+ * exceptions happen first):
+ *
+ * User code passes a bad variable ptr to a system call.
+ * Kernel tries to access the variable.
+ * Unaligned exception occurs.
+ * Unaligned exception handler tries to make aligned accesses.
+ * Double exception occurs for MMU-related cause (e.g., page not mapped).
+ * do_page_fault() thinks the fault address belongs to the kernel, not the
+ * user, and panics.
+ *
+ * The kernel currently prohibits user unaligned accesses. We use the
+ * __check_align_* macros to check for unaligned addresses before
+ * accessing user space so we don't crash the kernel. Both
+ * __put_user_asm and __get_user_asm use these alignment macros, so
+ * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
+ * sync.
+ */
+
+#define __check_align_1 ""
+
+#define __check_align_2 \
+ " _bbci.l %[mem] * 0, 1f \n" \
+ " movi %[err], %[efault] \n" \
+ " _j 2f \n"
+
+#define __check_align_4 \
+ " _bbsi.l %[mem] * 0, 0f \n" \
+ " _bbci.l %[mem] * 0 + 1, 1f \n" \
+ "0: movi %[err], %[efault] \n" \
+ " _j 2f \n"
+
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ *
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
+ */
+#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
+__asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %[x], %[mem] \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ " .literal_position \n" \
+ "5: \n" \
+ " movi %[tmp], 2b \n" \
+ " movi %[err], %[efault] \n" \
+ " jx %[tmp] \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
+ :[x] "r"(x_), [efault] "i"(-EFAULT))
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ __get_user_size((x), (ptr), (size), __gu_err); \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT; \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ if (access_ok(__gu_addr, size)) \
+ __get_user_size((x), __gu_addr, (size), __gu_err); \
+ else \
+ (x) = (__typeof__(*(ptr)))0; \
+ __gu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ int __cb; \
+ retval = 0; \
+ switch (size) { \
+ case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
+ case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
+ case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
+ case 8: { \
+ u64 __x; \
+ if (unlikely(__copy_from_user(&__x, ptr, 8))) { \
+ retval = -EFAULT; \
+ (x) = (__typeof__(*(ptr)))0; \
+ } else { \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ } \
+ break; \
+ } \
+ default: \
+ (x) = (__typeof__(*(ptr)))0; \
+ __get_user_bad(); \
+ } \
+} while (0)
+
+
+/*
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
+ */
+#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
+do { \
+ u32 __x = 0; \
+ __asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %[x], %[mem] \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ " .literal_position \n" \
+ "5: \n" \
+ " movi %[tmp], 2b \n" \
+ " movi %[err], %[efault] \n" \
+ " jx %[tmp] \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
+ :[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
+ (x_) = (__force __typeof__(*(addr_)))__x; \
+} while (0)
+
+
+/*
+ * Copy to/from user space
+ */
+
+extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ prefetchw(to);
+ return __xtensa_copy_user(to, (__force const void *)from, n);
+}
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ prefetch(from);
+ return __xtensa_copy_user((__force void *)to, from, n);
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+/*
+ * We need to return the number of bytes not cleared. Our memset()
+ * returns zero if a problem occurs while accessing user-space memory.
+ * In that event, return no memory cleared. Otherwise, zero for
+ * success.
+ */
+
+static inline unsigned long
+__xtensa_clear_user(void __user *addr, unsigned long size)
+{
+ if (!__memset((void __force *)addr, 0, size))
+ return size;
+ return 0;
+}
+
+static inline unsigned long
+clear_user(void __user *addr, unsigned long size)
+{
+ if (access_ok(addr, size))
+ return __xtensa_clear_user(addr, size);
+ return size ? -EFAULT : 0;
+}
+
+#define __clear_user __xtensa_clear_user
+
+
+#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
+
+extern long __strncpy_user(char *dst, const char __user *src, long count);
+
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ if (access_ok(src, 1))
+ return __strncpy_user(dst, src, count);
+ return -EFAULT;
+}
+#else
+long strncpy_from_user(char *dst, const char __user *src, long count);
+#endif
+
+/*
+ * Return the size of a string (including the ending 0!)
+ */
+extern long __strnlen_user(const char __user *str, long len);
+
+static inline long strnlen_user(const char __user *str, long len)
+{
+ if (!access_ok(str, 1))
+ return 0;
+ return __strnlen_user(str, len);
+}
+
+#endif /* _XTENSA_UACCESS_H */
diff --git a/arch/xtensa/include/asm/ucontext.h b/arch/xtensa/include/asm/ucontext.h
new file mode 100644
index 000000000..94c94ed3e
--- /dev/null
+++ b/arch/xtensa/include/asm/ucontext.h
@@ -0,0 +1,22 @@
+/*
+ * include/asm-xtensa/ucontext.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UCONTEXT_H
+#define _XTENSA_UCONTEXT_H
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* _XTENSA_UCONTEXT_H */
diff --git a/arch/xtensa/include/asm/unaligned.h b/arch/xtensa/include/asm/unaligned.h
new file mode 100644
index 000000000..8e7ed046b
--- /dev/null
+++ b/arch/xtensa/include/asm/unaligned.h
@@ -0,0 +1,29 @@
+/*
+ * Xtensa doesn't handle unaligned accesses efficiently.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _ASM_XTENSA_UNALIGNED_H
+#define _ASM_XTENSA_UNALIGNED_H
+
+#include <asm/byteorder.h>
+
+#ifdef __LITTLE_ENDIAN
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#else
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#endif
+
+#endif /* _ASM_XTENSA_UNALIGNED_H */
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
new file mode 100644
index 000000000..b52236245
--- /dev/null
+++ b/arch/xtensa/include/asm/unistd.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _XTENSA_UNISTD_H
+#define _XTENSA_UNISTD_H
+
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_UTIME32
+#define __ARCH_WANT_SYS_GETPGRP
+
+#define NR_syscalls __NR_syscalls
+
+#endif /* _XTENSA_UNISTD_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
new file mode 100644
index 000000000..704286c35
--- /dev/null
+++ b/arch/xtensa/include/asm/vectors.h
@@ -0,0 +1,93 @@
+/*
+ * arch/xtensa/include/asm/xchal_vaddr_remap.h
+ *
+ * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
+ * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
+ * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica Inc.
+ *
+ * Pete Delaney <piet@tensilica.com>
+ * Marc Gauthier <marc@tensilica.com
+ */
+
+#ifndef _XTENSA_VECTORS_H
+#define _XTENSA_VECTORS_H
+
+#include <asm/core.h>
+#include <asm/kmem_layout.h>
+
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+#ifdef CONFIG_KERNEL_VIRTUAL_ADDRESS
+#define KERNELOFFSET CONFIG_KERNEL_VIRTUAL_ADDRESS
+#else
+#define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \
+ XCHAL_KSEG_CACHED_VADDR - \
+ XCHAL_KSEG_PADDR)
+#endif
+#else
+#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
+#endif
+
+#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
+#ifdef CONFIG_VECTORS_ADDR
+#define VECBASE_VADDR (CONFIG_VECTORS_ADDR)
+#else
+#define VECBASE_VADDR _vecbase
+#endif
+
+#if XCHAL_HAVE_VECBASE
+
+#define VECTOR_VADDR(offset) (VECBASE_VADDR + offset)
+
+#define USER_VECTOR_VADDR VECTOR_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR VECTOR_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR VECTOR_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR VECTOR_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL6_VECOFS)
+#define INTLEVEL7_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL7_VECOFS)
+#define DEBUG_VECTOR_VADDR VECTOR_VADDR(XCHAL_DEBUG_VECOFS)
+
+/*
+ * These XCHAL_* #defines from varian/core.h
+ * are not valid to use with V3 MMU. Non-XCHAL
+ * constants are defined above and should be used.
+ */
+#undef XCHAL_VECBASE_RESET_VADDR
+#undef XCHAL_USER_VECTOR_VADDR
+#undef XCHAL_KERNEL_VECTOR_VADDR
+#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
+#undef XCHAL_WINDOW_VECTORS_VADDR
+#undef XCHAL_INTLEVEL2_VECTOR_VADDR
+#undef XCHAL_INTLEVEL3_VECTOR_VADDR
+#undef XCHAL_INTLEVEL4_VECTOR_VADDR
+#undef XCHAL_INTLEVEL5_VECTOR_VADDR
+#undef XCHAL_INTLEVEL6_VECTOR_VADDR
+#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+#undef XCHAL_DEBUG_VECTOR_VADDR
+
+#else
+
+#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR
+#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR
+#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR
+#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR
+#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
+#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR
+#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
+#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define INTLEVEL7_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
+
+#endif
+
+#endif /* _XTENSA_VECTORS_H */
diff --git a/arch/xtensa/include/asm/vermagic.h b/arch/xtensa/include/asm/vermagic.h
new file mode 100644
index 000000000..6d9c670e4
--- /dev/null
+++ b/arch/xtensa/include/asm/vermagic.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#include <linux/stringify.h>
+#include <variant/core.h>
+
+#define MODULE_ARCH_VERMAGIC "xtensa-" __stringify(XCHAL_CORE_ID) " "
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/xtensa/include/asm/vmalloc.h b/arch/xtensa/include/asm/vmalloc.h
new file mode 100644
index 000000000..0eb94b70b
--- /dev/null
+++ b/arch/xtensa/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_XTENSA_VMALLOC_H
+#define _ASM_XTENSA_VMALLOC_H
+
+#endif /* _ASM_XTENSA_VMALLOC_H */
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
new file mode 100644
index 000000000..b97c552db
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+generated-y += unistd_32.h
diff --git a/arch/xtensa/include/uapi/asm/auxvec.h b/arch/xtensa/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000..257dec75c
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/auxvec.h
@@ -0,0 +1,4 @@
+#ifndef __XTENSA_AUXVEC_H
+#define __XTENSA_AUXVEC_H
+
+#endif
diff --git a/arch/xtensa/include/uapi/asm/byteorder.h b/arch/xtensa/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000..5b9f832c2
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/byteorder.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _XTENSA_BYTEORDER_H
+#define _XTENSA_BYTEORDER_H
+
+#ifdef __XTENSA_EL__
+#include <linux/byteorder/little_endian.h>
+#elif defined(__XTENSA_EB__)
+#include <linux/byteorder/big_endian.h>
+#else
+# error processor byte order undefined!
+#endif
+
+#endif /* _XTENSA_BYTEORDER_H */
diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h
new file mode 100644
index 000000000..6d4a87296
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/ioctls.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/ioctls.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ *
+ * Derived from "include/asm-i386/ioctls.h"
+ */
+
+#ifndef _XTENSA_IOCTLS_H
+#define _XTENSA_IOCTLS_H
+
+#include <asm/ioctl.h>
+
+#define FIOCLEX _IO('f', 1)
+#define FIONCLEX _IO('f', 2)
+#define FIOASYNC _IOW('f', 125, int)
+#define FIONBIO _IOW('f', 126, int)
+#define FIONREAD _IOR('f', 127, int)
+#define TIOCINQ FIONREAD
+#define FIOQSIZE _IOR('f', 128, loff_t)
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+
+#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
+#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
+#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
+#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */
+
+#define TCSBRK _IO('t', 29)
+#define TCXONC _IO('t', 30)
+#define TCFLSH _IO('t', 31)
+
+#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
+#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
+#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
+#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
+#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
+
+#define TIOCSPGRP _IOW('t', 118, int)
+#define TIOCGPGRP _IOR('t', 119, int)
+
+#define TIOCEXCL _IO('T', 12)
+#define TIOCNXCL _IO('T', 13)
+#define TIOCSCTTY _IO('T', 14)
+
+#define TIOCSTI _IOW('T', 18, char)
+#define TIOCMGET _IOR('T', 21, unsigned int)
+#define TIOCMBIS _IOW('T', 22, unsigned int)
+#define TIOCMBIC _IOW('T', 23, unsigned int)
+#define TIOCMSET _IOW('T', 24, unsigned int)
+# define TIOCM_LE 0x001
+# define TIOCM_DTR 0x002
+# define TIOCM_RTS 0x004
+# define TIOCM_ST 0x008
+# define TIOCM_SR 0x010
+# define TIOCM_CTS 0x020
+# define TIOCM_CAR 0x040
+# define TIOCM_RNG 0x080
+# define TIOCM_DSR 0x100
+# define TIOCM_CD TIOCM_CAR
+# define TIOCM_RI TIOCM_RNG
+
+#define TIOCGSOFTCAR _IOR('T', 25, unsigned int)
+#define TIOCSSOFTCAR _IOW('T', 26, unsigned int)
+#define TIOCLINUX _IOW('T', 28, char)
+#define TIOCCONS _IO('T', 29)
+#define TIOCGSERIAL 0x803C541E /*_IOR('T', 30, struct serial_struct)*/
+#define TIOCSSERIAL 0x403C541F /*_IOW('T', 31, struct serial_struct)*/
+#define TIOCPKT _IOW('T', 32, int)
+# define TIOCPKT_DATA 0
+# define TIOCPKT_FLUSHREAD 1
+# define TIOCPKT_FLUSHWRITE 2
+# define TIOCPKT_STOP 4
+# define TIOCPKT_START 8
+# define TIOCPKT_NOSTOP 16
+# define TIOCPKT_DOSTOP 32
+# define TIOCPKT_IOCTL 64
+
+
+#define TIOCNOTTY _IO('T', 34)
+#define TIOCSETD _IOW('T', 35, int)
+#define TIOCGETD _IOR('T', 36, int)
+#define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/
+#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
+#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
+#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
+#define TCGETS2 _IOR('T', 42, struct termios2)
+#define TCSETS2 _IOW('T', 43, struct termios2)
+#define TCSETSW2 _IOW('T', 44, struct termios2)
+#define TCSETSF2 _IOW('T', 45, struct termios2)
+#define TIOCGRS485 _IOR('T', 46, struct serial_rs485)
+#define TIOCSRS485 _IOWR('T', 47, struct serial_rs485)
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
+#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
+#define TIOCVHANGUP _IO('T', 0x37)
+#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
+#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
+#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
+#define TIOCGISO7816 _IOR('T', 0x42, struct serial_iso7816)
+#define TIOCSISO7816 _IOWR('T', 0x43, struct serial_iso7816)
+
+#define TIOCSERCONFIG _IO('T', 83)
+#define TIOCSERGWILD _IOR('T', 84, int)
+#define TIOCSERSWILD _IOW('T', 85, int)
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */
+ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */
+ /* _IOR('T', 90, struct serial_multiport_struct) */
+#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */
+ /* _IOW('T', 91, struct serial_multiport_struct) */
+
+#define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+#endif /* _XTENSA_IOCTLS_H */
diff --git a/arch/xtensa/include/uapi/asm/ipcbuf.h b/arch/xtensa/include/uapi/asm/ipcbuf.h
new file mode 100644
index 000000000..3bd0642f6
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/ipcbuf.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/ipcbuf.h
+ *
+ * The ipc64_perm structure for the Xtensa architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IPCBUF_H
+#define _XTENSA_IPCBUF_H
+
+#include <linux/posix_types.h>
+
+/*
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+ unsigned long seq;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _XTENSA_IPCBUF_H */
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
new file mode 100644
index 000000000..e5e643752
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/mman.h
+ *
+ * Xtensa Processor memory-manager definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 by Ralf Baechle
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMAN_H
+#define _XTENSA_MMAN_H
+
+/*
+ * Protections are chosen from these bits, OR'd together. The
+ * implementation does not necessarily support PROT_EXEC or PROT_WRITE
+ * without PROT_READ. The only guarantees are that no writing will be
+ * allowed without PROT_WRITE and no access will be allowed for PROT_NONE.
+ */
+
+#define PROT_NONE 0x0 /* page can not be accessed */
+#define PROT_READ 0x1 /* page can be read */
+#define PROT_WRITE 0x2 /* page can be written */
+#define PROT_EXEC 0x4 /* page can be executed */
+
+#define PROT_SEM 0x10 /* page may be used for atomic ops */
+#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end fo growsup vma */
+
+/*
+ * Flags for mmap
+ */
+/* 0x01 - 0x03 are defined in linux/mman.h */
+#define MAP_TYPE 0x00f /* Mask for type of mapping */
+#define MAP_FIXED 0x010 /* Interpret addr exactly */
+
+/* not used by linux, but here to make sure we don't clash with ABI defines */
+#define MAP_RENAME 0x020 /* Assign page to file */
+#define MAP_AUTOGROW 0x040 /* File may grow by writing */
+#define MAP_LOCAL 0x080 /* Copy on fork/sproc */
+#define MAP_AUTORSRV 0x100 /* Logical swap reserved on demand */
+
+/* These are linux-specific */
+#define MAP_NORESERVE 0x0400 /* don't check for reservations */
+#define MAP_ANONYMOUS 0x0800 /* don't use a file */
+#define MAP_GROWSDOWN 0x1000 /* stack-like segment */
+#define MAP_DENYWRITE 0x2000 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x4000 /* mark it as an executable */
+#define MAP_LOCKED 0x8000 /* pages are locked */
+#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x20000 /* do not block on IO */
+#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
+#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
+#define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
+ * uninitialized */
+
+/*
+ * Flags for msync
+ */
+#define MS_ASYNC 0x0001 /* sync memory asynchronously */
+#define MS_INVALIDATE 0x0002 /* invalidate mappings & caches */
+#define MS_SYNC 0x0004 /* synchronous memory sync */
+
+/*
+ * Flags for mlockall
+ */
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
+#define MCL_ONFAULT 4 /* lock all pages that are faulted in */
+
+/*
+ * Flags for mlock
+ */
+#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
+
+#define MADV_NORMAL 0 /* no further special treatment */
+#define MADV_RANDOM 1 /* expect random page references */
+#define MADV_SEQUENTIAL 2 /* expect sequential page references */
+#define MADV_WILLNEED 3 /* will need these pages */
+#define MADV_DONTNEED 4 /* don't need these pages */
+
+/* common parameters: try to keep these consistent across architectures */
+#define MADV_FREE 8 /* free pages only if memory pressure */
+#define MADV_REMOVE 9 /* remove these pages & resources */
+#define MADV_DONTFORK 10 /* don't inherit across fork */
+#define MADV_DOFORK 11 /* do inherit across fork */
+
+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
+
+#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
+
+#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
+ overrides the coredump filter bits */
+#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
+/* compatibility flags */
+#define MAP_FILE 0
+
+#define PKEY_DISABLE_ACCESS 0x1
+#define PKEY_DISABLE_WRITE 0x2
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE)
+
+#endif /* _XTENSA_MMAN_H */
diff --git a/arch/xtensa/include/uapi/asm/msgbuf.h b/arch/xtensa/include/uapi/asm/msgbuf.h
new file mode 100644
index 000000000..1ed2c85b6
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/msgbuf.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/msgbuf.h
+ *
+ * The msqid64_ds structure for the Xtensa architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 32-bit values
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_MSGBUF_H
+#define _XTENSA_MSGBUF_H
+
+#include <asm/ipcbuf.h>
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+#ifdef __XTENSA_EB__
+ unsigned long msg_stime_high;
+ unsigned long msg_stime; /* last msgsnd time */
+ unsigned long msg_rtime_high;
+ unsigned long msg_rtime; /* last msgrcv time */
+ unsigned long msg_ctime_high;
+ unsigned long msg_ctime; /* last change time */
+#elif defined(__XTENSA_EL__)
+ unsigned long msg_stime; /* last msgsnd time */
+ unsigned long msg_stime_high;
+ unsigned long msg_rtime; /* last msgrcv time */
+ unsigned long msg_rtime_high;
+ unsigned long msg_ctime; /* last change time */
+ unsigned long msg_ctime_high;
+#else
+# error processor byte order undefined!
+#endif
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* _XTENSA_MSGBUF_H */
diff --git a/arch/xtensa/include/uapi/asm/param.h b/arch/xtensa/include/uapi/asm/param.h
new file mode 100644
index 000000000..e6feb4ee0
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/param.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/param.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_PARAM_H
+#define _UAPI_XTENSA_PARAM_H
+
+#ifndef __KERNEL__
+# define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif /* _UAPI_XTENSA_PARAM_H */
diff --git a/arch/xtensa/include/uapi/asm/poll.h b/arch/xtensa/include/uapi/asm/poll.h
new file mode 100644
index 000000000..4d249040b
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/poll.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/poll.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_POLL_H
+#define _XTENSA_POLL_H
+
+#define POLLWRNORM POLLOUT
+#define POLLWRBAND 0x0100
+#define POLLREMOVE 0x0800
+
+#include <asm-generic/poll.h>
+
+#endif /* _XTENSA_POLL_H */
diff --git a/arch/xtensa/include/uapi/asm/posix_types.h b/arch/xtensa/include/uapi/asm/posix_types.h
new file mode 100644
index 000000000..1dc675928
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/posix_types.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/posix_types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Largely copied from include/asm-ppc/posix_types.h
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_POSIX_TYPES_H
+#define _XTENSA_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef long __kernel_ptrdiff_t;
+#define __kernel_size_t __kernel_size_t
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+#define __kernel_old_uid_t __kernel_old_uid_t
+
+typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
+
+#include <asm-generic/posix_types.h>
+
+#endif /* _XTENSA_POSIX_TYPES_H */
diff --git a/arch/xtensa/include/uapi/asm/ptrace.h b/arch/xtensa/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000..50db3e0a6
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/ptrace.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_PTRACE_H
+#define _UAPI_XTENSA_PTRACE_H
+
+#include <linux/types.h>
+
+/* Registers used by strace */
+
+#define REG_A_BASE 0x0000
+#define REG_AR_BASE 0x0100
+#define REG_PC 0x0020
+#define REG_PS 0x02e6
+#define REG_WB 0x0248
+#define REG_WS 0x0249
+#define REG_LBEG 0x0200
+#define REG_LEND 0x0201
+#define REG_LCOUNT 0x0202
+#define REG_SAR 0x0203
+
+#define SYSCALL_NR 0x00ff
+
+/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
+
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETXTREGS 18
+#define PTRACE_SETXTREGS 19
+#define PTRACE_GETHBPREGS 20
+#define PTRACE_SETHBPREGS 21
+
+#ifndef __ASSEMBLY__
+
+struct user_pt_regs {
+ __u32 pc;
+ __u32 ps;
+ __u32 lbeg;
+ __u32 lend;
+ __u32 lcount;
+ __u32 sar;
+ __u32 windowstart;
+ __u32 windowbase;
+ __u32 threadptr;
+ __u32 syscall;
+ __u32 reserved[6 + 48];
+ __u32 a[64];
+};
+
+#endif
+#endif /* _UAPI_XTENSA_PTRACE_H */
diff --git a/arch/xtensa/include/uapi/asm/sembuf.h b/arch/xtensa/include/uapi/asm/sembuf.h
new file mode 100644
index 000000000..3b9cdd406
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/sembuf.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/sembuf.h
+ *
+ * The semid64_ds structure for Xtensa architecture.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 32-bit values
+ *
+ */
+
+#ifndef _XTENSA_SEMBUF_H
+#define _XTENSA_SEMBUF_H
+
+#include <asm/byteorder.h>
+#include <asm/ipcbuf.h>
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+#ifdef __XTENSA_EL__
+ unsigned long sem_otime; /* last semop time */
+ unsigned long sem_otime_high;
+ unsigned long sem_ctime; /* last change time */
+ unsigned long sem_ctime_high;
+#else
+ unsigned long sem_otime_high;
+ unsigned long sem_otime; /* last semop time */
+ unsigned long sem_ctime_high;
+ unsigned long sem_ctime; /* last change time */
+#endif
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_XTENSA_SEMBUF_H */
diff --git a/arch/xtensa/include/uapi/asm/setup.h b/arch/xtensa/include/uapi/asm/setup.h
new file mode 100644
index 000000000..5356a5fd4
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/setup.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/setup.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SETUP_H
+#define _XTENSA_SETUP_H
+
+#define COMMAND_LINE_SIZE 256
+
+#endif
diff --git a/arch/xtensa/include/uapi/asm/shmbuf.h b/arch/xtensa/include/uapi/asm/shmbuf.h
new file mode 100644
index 000000000..554a57a6a
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/shmbuf.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/shmbuf.h
+ *
+ * The shmid64_ds structure for Xtensa architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space, but the padding is on the wrong
+ * side for big-endian xtensa, for historic reasons.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 32-bit values
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SHMBUF_H
+#define _XTENSA_SHMBUF_H
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ unsigned long shm_atime; /* last attach time */
+ unsigned long shm_atime_high;
+ unsigned long shm_dtime; /* last detach time */
+ unsigned long shm_dtime_high;
+ unsigned long shm_ctime; /* last change time */
+ unsigned long shm_ctime_high;
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _XTENSA_SHMBUF_H */
diff --git a/arch/xtensa/include/uapi/asm/sigcontext.h b/arch/xtensa/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000..9cbf39e63
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/sigcontext.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/sigcontext.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SIGCONTEXT_H
+#define _XTENSA_SIGCONTEXT_H
+
+
+struct sigcontext {
+ unsigned long sc_pc;
+ unsigned long sc_ps;
+ unsigned long sc_lbeg;
+ unsigned long sc_lend;
+ unsigned long sc_lcount;
+ unsigned long sc_sar;
+ unsigned long sc_acclo;
+ unsigned long sc_acchi;
+ unsigned long sc_a[16];
+ void *sc_xtregs;
+};
+
+#endif /* _XTENSA_SIGCONTEXT_H */
diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h
new file mode 100644
index 000000000..005dec5bf
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/signal.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/signal.h
+ *
+ * Swiped from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_SIGNAL_H
+#define _UAPI_XTENSA_SIGNAL_H
+
+
+#define _NSIG 64
+#define _NSIG_BPW 32
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#endif
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/* #define SIGLOST 29 */
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX (_NSIG-1)
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#ifndef __ASSEMBLY__
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __KERNEL__
+
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* _UAPI_XTENSA_SIGNAL_H */
diff --git a/arch/xtensa/include/uapi/asm/sockios.h b/arch/xtensa/include/uapi/asm/sockios.h
new file mode 100644
index 000000000..1a1f58f4b
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/sockios.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/sockios.h
+ *
+ * Socket-level I/O control calls. Copied from MIPS.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 by Ralf Baechle
+ * Copyright (C) 2001 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SOCKIOS_H
+#define _XTENSA_SOCKIOS_H
+
+#include <asm/ioctl.h>
+
+/* Socket-level I/O control calls. */
+
+#define FIOGETOWN _IOR('f', 123, int)
+#define FIOSETOWN _IOW('f', 124, int)
+
+#define SIOCATMARK _IOR('s', 7, int)
+#define SIOCSPGRP _IOW('s', 8, pid_t)
+#define SIOCGPGRP _IOR('s', 9, pid_t)
+
+#define SIOCGSTAMP_OLD 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS_OLD 0x8907 /* Get stamp (timespec) */
+
+#endif /* _XTENSA_SOCKIOS_H */
diff --git a/arch/xtensa/include/uapi/asm/stat.h b/arch/xtensa/include/uapi/asm/stat.h
new file mode 100644
index 000000000..7b4d90d6d
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/stat.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/stat.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_STAT_H
+#define _XTENSA_STAT_H
+
+#define STAT_HAVE_NSEC 1
+
+struct stat {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned long st_rdev;
+ long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct stat64 {
+ unsigned long long st_dev; /* Device */
+ unsigned long long st_ino; /* File serial number */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long long st_rdev; /* Device number, if device. */
+ long long st_size; /* Size of file, in bytes. */
+ unsigned long st_blksize; /* Optimal block size for I/O. */
+ unsigned long __unused2;
+ unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned long st_atime; /* Time of last access. */
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime; /* Time of last modification. */
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime; /* Time of last status change. */
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* _XTENSA_STAT_H */
diff --git a/arch/xtensa/include/uapi/asm/swab.h b/arch/xtensa/include/uapi/asm/swab.h
new file mode 100644
index 000000000..e677cf4cc
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/swab.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/swab.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SWAB_H
+#define _XTENSA_SWAB_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#define __SWAB_64_THRU_32__
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ __u32 res;
+ /* instruction sequence from Xtensa ISA release 2/2000 */
+ __asm__("ssai 8 \n\t"
+ "srli %0, %1, 16 \n\t"
+ "src %0, %0, %1 \n\t"
+ "src %0, %0, %0 \n\t"
+ "src %0, %1, %0 \n"
+ : "=&a" (res)
+ : "a" (x)
+ );
+ return res;
+}
+#define __arch_swab32 __arch_swab32
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+ /* Given that 'short' values are signed (i.e., can be negative),
+ * we cannot assume that the upper 16-bits of the register are
+ * zero. We are careful to mask values after shifting.
+ */
+
+ /* There exists an anomaly between xt-gcc and xt-xcc. xt-gcc
+ * inserts an extui instruction after putting this function inline
+ * to ensure that it uses only the least-significant 16 bits of
+ * the result. xt-xcc doesn't use an extui, but assumes the
+ * __asm__ macro follows convention that the upper 16 bits of an
+ * 'unsigned short' result are still zero. This macro doesn't
+ * follow convention; indeed, it leaves garbage in the upport 16
+ * bits of the register.
+
+ * Declaring the temporary variables 'res' and 'tmp' to be 32-bit
+ * types while the return type of the function is a 16-bit type
+ * forces both compilers to insert exactly one extui instruction
+ * (or equivalent) to mask off the upper 16 bits. */
+
+ __u32 res;
+ __u32 tmp;
+
+ __asm__("extui %1, %2, 8, 8\n\t"
+ "slli %0, %2, 8 \n\t"
+ "or %0, %0, %1 \n"
+ : "=&a" (res), "=&a" (tmp)
+ : "a" (x)
+ );
+
+ return res;
+}
+#define __arch_swab16 __arch_swab16
+
+#endif /* _XTENSA_SWAB_H */
diff --git a/arch/xtensa/include/uapi/asm/termbits.h b/arch/xtensa/include/uapi/asm/termbits.h
new file mode 100644
index 000000000..d4206a7c5
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/termbits.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/termbits.h
+ *
+ * Copied from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TERMBITS_H
+#define _XTENSA_TERMBITS_H
+
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+#define IUTF8 0040000
+
+/* c_oflag bits */
+
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define BOTHER 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* c_lflag bits */
+
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+#define EXTPROC 0200000
+
+/* tcflow() and TCXONC use these */
+
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _XTENSA_TERMBITS_H */
diff --git a/arch/xtensa/include/uapi/asm/types.h b/arch/xtensa/include/uapi/asm/types.h
new file mode 100644
index 000000000..12db8ac38
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/types.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_TYPES_H
+#define _UAPI_XTENSA_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+#ifdef __ASSEMBLY__
+# define __XTENSA_UL(x) (x)
+# define __XTENSA_UL_CONST(x) x
+#else
+# define __XTENSA_UL(x) ((unsigned long)(x))
+# define ___XTENSA_UL_CONST(x) x##UL
+# define __XTENSA_UL_CONST(x) ___XTENSA_UL_CONST(x)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#endif
+
+#endif /* _UAPI_XTENSA_TYPES_H */
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
new file mode 100644
index 000000000..e67ccdd11
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_XTENSA_UNISTD_H
+#define _UAPI_XTENSA_UNISTD_H
+
+#include <asm/unistd_32.h>
+
+#define __ARCH_WANT_SYS_OLDUMOUNT
+
+/*
+ * sysxtensa syscall handler
+ *
+ * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
+ * a2 a6 a3 a4 a5
+ */
+
+#define SYS_XTENSA_RESERVED 0 /* don't use this */
+#define SYS_XTENSA_ATOMIC_SET 1 /* set variable */
+#define SYS_XTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */
+#define SYS_XTENSA_ATOMIC_ADD 3 /* add to memory */
+#define SYS_XTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */
+#define SYS_XTENSA_COUNT 5 /* count */
+
+#endif /* _UAPI_XTENSA_UNISTD_H */