summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/include/asm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /arch/xtensa/include/asm
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/xtensa/include/asm')
-rw-r--r--arch/xtensa/include/asm/Kbuild10
-rw-r--r--arch/xtensa/include/asm/asm-offsets.h1
-rw-r--r--arch/xtensa/include/asm/asm-prototypes.h29
-rw-r--r--arch/xtensa/include/asm/asm-uaccess.h84
-rw-r--r--arch/xtensa/include/asm/asmmacro.h340
-rw-r--r--arch/xtensa/include/asm/atomic.h269
-rw-r--r--arch/xtensa/include/asm/barrier.h31
-rw-r--r--arch/xtensa/include/asm/bitops.h218
-rw-r--r--arch/xtensa/include/asm/bootparam.h50
-rw-r--r--arch/xtensa/include/asm/cache.h40
-rw-r--r--arch/xtensa/include/asm/cacheasm.h214
-rw-r--r--arch/xtensa/include/asm/cacheflush.h187
-rw-r--r--arch/xtensa/include/asm/checksum.h247
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h223
-rw-r--r--arch/xtensa/include/asm/coprocessor.h155
-rw-r--r--arch/xtensa/include/asm/core.h68
-rw-r--r--arch/xtensa/include/asm/current.h40
-rw-r--r--arch/xtensa/include/asm/delay.h75
-rw-r--r--arch/xtensa/include/asm/dma.h55
-rw-r--r--arch/xtensa/include/asm/elf.h196
-rw-r--r--arch/xtensa/include/asm/fixmap.h38
-rw-r--r--arch/xtensa/include/asm/flat.h19
-rw-r--r--arch/xtensa/include/asm/ftrace.h31
-rw-r--r--arch/xtensa/include/asm/futex.h167
-rw-r--r--arch/xtensa/include/asm/highmem.h84
-rw-r--r--arch/xtensa/include/asm/hw_breakpoint.h62
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h245
-rw-r--r--arch/xtensa/include/asm/io.h62
-rw-r--r--arch/xtensa/include/asm/irq.h42
-rw-r--r--arch/xtensa/include/asm/irqflags.h83
-rw-r--r--arch/xtensa/include/asm/jump_label.h65
-rw-r--r--arch/xtensa/include/asm/kasan.h39
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h104
-rw-r--r--arch/xtensa/include/asm/linkage.h9
-rw-r--r--arch/xtensa/include/asm/mmu.h22
-rw-r--r--arch/xtensa/include/asm/mmu_context.h154
-rw-r--r--arch/xtensa/include/asm/mtd-xip.h14
-rw-r--r--arch/xtensa/include/asm/mxregs.h46
-rw-r--r--arch/xtensa/include/asm/nommu_context.h10
-rw-r--r--arch/xtensa/include/asm/page.h203
-rw-r--r--arch/xtensa/include/asm/pci-bridge.h76
-rw-r--r--arch/xtensa/include/asm/pci.h46
-rw-r--r--arch/xtensa/include/asm/perf_event.h4
-rw-r--r--arch/xtensa/include/asm/pgalloc.h67
-rw-r--r--arch/xtensa/include/asm/pgtable.h429
-rw-r--r--arch/xtensa/include/asm/platform.h46
-rw-r--r--arch/xtensa/include/asm/processor.h269
-rw-r--r--arch/xtensa/include/asm/ptrace.h119
-rw-r--r--arch/xtensa/include/asm/regs.h118
-rw-r--r--arch/xtensa/include/asm/seccomp.h11
-rw-r--r--arch/xtensa/include/asm/sections.h45
-rw-r--r--arch/xtensa/include/asm/serial.h18
-rw-r--r--arch/xtensa/include/asm/shmparam.h21
-rw-r--r--arch/xtensa/include/asm/signal.h23
-rw-r--r--arch/xtensa/include/asm/smp.h44
-rw-r--r--arch/xtensa/include/asm/spinlock.h20
-rw-r--r--arch/xtensa/include/asm/spinlock_types.h12
-rw-r--r--arch/xtensa/include/asm/stackprotector.h33
-rw-r--r--arch/xtensa/include/asm/stacktrace.h44
-rw-r--r--arch/xtensa/include/asm/string.h137
-rw-r--r--arch/xtensa/include/asm/switch_to.h22
-rw-r--r--arch/xtensa/include/asm/syscall.h76
-rw-r--r--arch/xtensa/include/asm/sysmem.h19
-rw-r--r--arch/xtensa/include/asm/thread_info.h145
-rw-r--r--arch/xtensa/include/asm/timex.h60
-rw-r--r--arch/xtensa/include/asm/tlb.h23
-rw-r--r--arch/xtensa/include/asm/tlbflush.h205
-rw-r--r--arch/xtensa/include/asm/traps.h145
-rw-r--r--arch/xtensa/include/asm/uaccess.h295
-rw-r--r--arch/xtensa/include/asm/ucontext.h22
-rw-r--r--arch/xtensa/include/asm/unistd.h16
-rw-r--r--arch/xtensa/include/asm/vectors.h93
-rw-r--r--arch/xtensa/include/asm/vermagic.h17
-rw-r--r--arch/xtensa/include/asm/vmalloc.h4
74 files changed, 6755 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
new file mode 100644
index 000000000..fa07c686c
--- /dev/null
+++ b/arch/xtensa/include/asm/Kbuild
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+generated-y += syscall_table.h
+generic-y += extable.h
+generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += qrwlock.h
+generic-y += qspinlock.h
+generic-y += user.h
diff --git a/arch/xtensa/include/asm/asm-offsets.h b/arch/xtensa/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/xtensa/include/asm/asm-prototypes.h b/arch/xtensa/include/asm/asm-prototypes.h
new file mode 100644
index 000000000..b0da61812
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-prototypes.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PROTOTYPES_H
+#define __ASM_PROTOTYPES_H
+
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/ftrace.h>
+#include <asm/page.h>
+#include <asm/string.h>
+#include <asm/uaccess.h>
+
+#include <asm-generic/asm-prototypes.h>
+
+/*
+ * gcc internal math functions
+ */
+long long __ashrdi3(long long, int);
+long long __ashldi3(long long, int);
+long long __bswapdi2(long long);
+int __bswapsi2(int);
+long long __lshrdi3(long long, int);
+int __divsi3(int, int);
+int __modsi3(int, int);
+int __mulsi3(int, int);
+unsigned int __udivsi3(unsigned int, unsigned int);
+unsigned int __umodsi3(unsigned int, unsigned int);
+unsigned long long __umulsidi3(unsigned int, unsigned int);
+
+#endif /* __ASM_PROTOTYPES_H */
diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h
new file mode 100644
index 000000000..7cec86913
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-uaccess.h
@@ -0,0 +1,84 @@
+/*
+ * include/asm-xtensa/uaccess.h
+ *
+ * User space memory access functions
+ *
+ * These routines provide basic accessing functions to the user memory
+ * space for the kernel. This header file provides functions such as:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ASM_UACCESS_H
+#define _XTENSA_ASM_UACCESS_H
+
+#include <linux/errno.h>
+#include <asm/types.h>
+
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+
+/*
+ * user_ok determines whether the access to user-space memory is allowed.
+ * See the equivalent C-macro version below for clarity.
+ *
+ * On error, user_ok branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on success).
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed (actually, (TASK_SIZE + 1 - size))
+ */
+ .macro user_ok aa, as, at, error
+ movi \at, __XTENSA_UL_CONST(TASK_SIZE)
+ bgeu \as, \at, \error
+ sub \at, \at, \as
+ bgeu \aa, \at, \error
+ .endm
+
+/*
+ * access_ok determines whether a memory access is allowed. See the
+ * equivalent C-macro version below for clarity.
+ *
+ * On error, access_ok branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that we assume success is the common case, and we optimize the
+ * branch fall-through case on success.
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <sp>
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed
+ */
+ .macro access_ok aa, as, at, sp, error
+ user_ok \aa, \as, \at, \error
+.Laccess_ok_\@:
+ .endm
+
+#endif /* _XTENSA_ASM_UACCESS_H */
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
new file mode 100644
index 000000000..01bf7d9db
--- /dev/null
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -0,0 +1,340 @@
+/*
+ * include/asm-xtensa/asmmacro.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ASMMACRO_H
+#define _XTENSA_ASMMACRO_H
+
+#include <asm-generic/export.h>
+#include <asm/core.h>
+
+/*
+ * Some little helpers for loops. Use zero-overhead-loops
+ * where applicable and if supported by the processor.
+ *
+ * __loopi ar, at, size, inc
+ * ar register initialized with the start address
+ * at scratch register used by macro
+ * size size immediate value
+ * inc increment
+ *
+ * __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
+ * ar register initialized with the start address
+ * as register initialized with the size
+ * at scratch register use by macro
+ * inc_log2 increment [in log2]
+ * mask_log2 mask [in log2]
+ * cond true condition (used in loop'cond')
+ * ncond false condition (used in b'ncond')
+ *
+ * __loop as
+ * restart loop. 'as' register must not have been modified!
+ *
+ * __endla ar, as, incr
+ * ar start address (modified)
+ * as scratch register used by __loops/__loopi macros or
+ * end address used by __loopt macro
+ * inc increment
+ */
+
+/*
+ * loop for given size as immediate
+ */
+
+ .macro __loopi ar, at, size, incr
+
+#if XCHAL_HAVE_LOOPS
+ movi \at, ((\size + \incr - 1) / (\incr))
+ loop \at, 99f
+#else
+ addi \at, \ar, \size
+ 98:
+#endif
+
+ .endm
+
+/*
+ * loop for given size in register
+ */
+
+ .macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
+
+#if XCHAL_HAVE_LOOPS
+ .ifgt \incr_log2 - 1
+ addi \at, \as, (1 << \incr_log2) - 1
+ .ifnc \mask_log2,
+ extui \at, \at, \incr_log2, \mask_log2
+ .else
+ srli \at, \at, \incr_log2
+ .endif
+ .endif
+ loop\cond \at, 99f
+#else
+ .ifnc \mask_log2,
+ extui \at, \as, \incr_log2, \mask_log2
+ .else
+ .ifnc \ncond,
+ srli \at, \as, \incr_log2
+ .endif
+ .endif
+ .ifnc \ncond,
+ b\ncond \at, 99f
+
+ .endif
+ .ifnc \mask_log2,
+ slli \at, \at, \incr_log2
+ add \at, \ar, \at
+ .else
+ add \at, \ar, \as
+ .endif
+#endif
+ 98:
+
+ .endm
+
+/*
+ * loop from ar to as
+ */
+
+ .macro __loopt ar, as, at, incr_log2
+
+#if XCHAL_HAVE_LOOPS
+ sub \at, \as, \ar
+ .ifgt \incr_log2 - 1
+ addi \at, \at, (1 << \incr_log2) - 1
+ srli \at, \at, \incr_log2
+ .endif
+ loop \at, 99f
+#else
+ 98:
+#endif
+
+ .endm
+
+/*
+ * restart loop. registers must be unchanged
+ */
+
+ .macro __loop as
+
+#if XCHAL_HAVE_LOOPS
+ loop \as, 99f
+#else
+ 98:
+#endif
+
+ .endm
+
+/*
+ * end of loop with no increment of the address.
+ */
+
+ .macro __endl ar, as
+#if !XCHAL_HAVE_LOOPS
+ bltu \ar, \as, 98b
+#endif
+ 99:
+ .endm
+
+/*
+ * end of loop with increment of the address.
+ */
+
+ .macro __endla ar, as, incr
+ addi \ar, \ar, \incr
+ __endl \ar \as
+ .endm
+
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(handler) \
+ .section __ex_table, "a"; \
+ .word 97f, handler; \
+ .previous \
+97:
+
+
+/*
+ * Extract unaligned word that is split between two registers w0 and w1
+ * into r regardless of machine endianness. SAR must be loaded with the
+ * starting bit of the word (see __ssa8).
+ */
+
+ .macro __src_b r, w0, w1
+#ifdef __XTENSA_EB__
+ src \r, \w0, \w1
+#else
+ src \r, \w1, \w0
+#endif
+ .endm
+
+/*
+ * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned
+ * word starting at r from two registers loaded from consecutive aligned
+ * addresses covering r regardless of machine endianness.
+ *
+ * r 0 1 2 3
+ * LE SAR 0 8 16 24
+ * BE SAR 32 24 16 8
+ */
+
+ .macro __ssa8 r
+#ifdef __XTENSA_EB__
+ ssa8b \r
+#else
+ ssa8l \r
+#endif
+ .endm
+
+ .macro do_nsau cnt, val, tmp, a
+#if XCHAL_HAVE_NSA
+ nsau \cnt, \val
+#else
+ mov \a, \val
+ movi \cnt, 0
+ extui \tmp, \a, 16, 16
+ bnez \tmp, 0f
+ movi \cnt, 16
+ slli \a, \a, 16
+0:
+ extui \tmp, \a, 24, 8
+ bnez \tmp, 1f
+ addi \cnt, \cnt, 8
+ slli \a, \a, 8
+1:
+ movi \tmp, __nsau_data
+ extui \a, \a, 24, 8
+ add \tmp, \tmp, \a
+ l8ui \tmp, \tmp, 0
+ add \cnt, \cnt, \tmp
+#endif /* !XCHAL_HAVE_NSA */
+ .endm
+
+ .macro do_abs dst, src, tmp
+#if XCHAL_HAVE_ABS
+ abs \dst, \src
+#else
+ neg \tmp, \src
+ movgez \tmp, \src, \src
+ mov \dst, \tmp
+#endif
+ .endm
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+
+/* Assembly instructions for windowed kernel ABI. */
+#define KABI_W
+/* Assembly instructions for call0 kernel ABI (will be ignored). */
+#define KABI_C0 #
+
+#define XTENSA_FRAME_SIZE_RESERVE 16
+#define XTENSA_SPILL_STACK_RESERVE 32
+
+#define abi_entry(frame_size) \
+ entry sp, (XTENSA_FRAME_SIZE_RESERVE + \
+ (((frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
+ -XTENSA_STACK_ALIGNMENT))
+#define abi_entry_default abi_entry(0)
+
+#define abi_ret(frame_size) retw
+#define abi_ret_default retw
+
+ /* direct call */
+#define abi_call call4
+ /* indirect call */
+#define abi_callx callx4
+ /* outgoing call argument registers */
+#define abi_arg0 a6
+#define abi_arg1 a7
+#define abi_arg2 a8
+#define abi_arg3 a9
+#define abi_arg4 a10
+#define abi_arg5 a11
+ /* return value */
+#define abi_rv a6
+ /* registers preserved across call */
+#define abi_saved0 a2
+#define abi_saved1 a3
+
+ /* none of the above */
+#define abi_tmp0 a4
+#define abi_tmp1 a5
+
+#elif defined(__XTENSA_CALL0_ABI__)
+
+/* Assembly instructions for windowed kernel ABI (will be ignored). */
+#define KABI_W #
+/* Assembly instructions for call0 kernel ABI. */
+#define KABI_C0
+
+#define XTENSA_SPILL_STACK_RESERVE 0
+
+#define abi_entry(frame_size) __abi_entry (frame_size)
+
+ .macro __abi_entry frame_size
+ .ifgt \frame_size
+ addi sp, sp, -(((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
+ -XTENSA_STACK_ALIGNMENT)
+ .endif
+ .endm
+
+#define abi_entry_default
+
+#define abi_ret(frame_size) __abi_ret (frame_size)
+
+ .macro __abi_ret frame_size
+ .ifgt \frame_size
+ addi sp, sp, (((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
+ -XTENSA_STACK_ALIGNMENT)
+ .endif
+ ret
+ .endm
+
+#define abi_ret_default ret
+
+ /* direct call */
+#define abi_call call0
+ /* indirect call */
+#define abi_callx callx0
+ /* outgoing call argument registers */
+#define abi_arg0 a2
+#define abi_arg1 a3
+#define abi_arg2 a4
+#define abi_arg3 a5
+#define abi_arg4 a6
+#define abi_arg5 a7
+ /* return value */
+#define abi_rv a2
+ /* registers preserved across call */
+#define abi_saved0 a12
+#define abi_saved1 a13
+
+ /* none of the above */
+#define abi_tmp0 a8
+#define abi_tmp1 a9
+
+#else
+#error Unsupported Xtensa ABI
+#endif
+
+#if defined(USER_SUPPORT_WINDOWED)
+/* Assembly instructions for windowed user ABI. */
+#define UABI_W
+/* Assembly instructions for call0 user ABI (will be ignored). */
+#define UABI_C0 #
+#else
+/* Assembly instructions for windowed user ABI (will be ignored). */
+#define UABI_W #
+/* Assembly instructions for call0 user ABI. */
+#define UABI_C0
+#endif
+
+#define __XTENSA_HANDLER .section ".exception.text", "ax"
+
+#endif /* _XTENSA_ASMMACRO_H */
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
new file mode 100644
index 000000000..7308b7f77
--- /dev/null
+++ b/arch/xtensa/include/asm/atomic.h
@@ -0,0 +1,269 @@
+/*
+ * include/asm-xtensa/atomic.h
+ *
+ * Atomic operations that C can't guarantee us. Useful for resource counting..
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ATOMIC_H
+#define _XTENSA_ATOMIC_H
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <asm/processor.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+/*
+ * This Xtensa implementation assumes that the right mechanism
+ * for exclusion is for locking interrupts to level EXCM_LEVEL.
+ *
+ * Locking interrupts looks like this:
+ *
+ * rsil a14, TOPLEVEL
+ * <code>
+ * wsr a14, PS
+ * rsync
+ *
+ * Note that a14 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a14 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+
+#if XCHAL_HAVE_EXCLUSIVE
+#define ATOMIC_OP(op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
+ : "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return tmp; \
+}
+
+#elif XCHAL_HAVE_S32C1I
+#define ATOMIC_OP(op) \
+static inline void arch_atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+#define ATOMIC_OP(op) \
+static inline void arch_atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned int vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a14, "__stringify(TOPLEVEL)"\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ " s32i %[result], %[mem]\n" \
+ " wsr a14, ps\n" \
+ " rsync\n" \
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "a14", "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned int vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a14,"__stringify(TOPLEVEL)"\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ " s32i %[result], %[mem]\n" \
+ " wsr a14, ps\n" \
+ " rsync\n" \
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "a14", "memory" \
+ ); \
+ \
+ return vval; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned int tmp, vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a14,"__stringify(TOPLEVEL)"\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[tmp], %[result], %[i]\n" \
+ " s32i %[tmp], %[mem]\n" \
+ " wsr a14, ps\n" \
+ " rsync\n" \
+ : [result] "=&a" (vval), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
+ : "a14", "memory" \
+ ); \
+ \
+ return vval; \
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
+
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
+
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
new file mode 100644
index 000000000..898ea397e
--- /dev/null
+++ b/arch/xtensa/include/asm/barrier.h
@@ -0,0 +1,31 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2012 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SYSTEM_H
+#define _XTENSA_SYSTEM_H
+
+#include <asm/core.h>
+
+#define __mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
+#define __rmb() barrier()
+#define __wmb() __mb()
+
+#ifdef CONFIG_SMP
+#define __smp_mb() __mb()
+#define __smp_rmb() __rmb()
+#define __smp_wmb() __wmb()
+#endif
+
+#if XCHAL_HAVE_S32C1I
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif /* _XTENSA_SYSTEM_H */
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
new file mode 100644
index 000000000..e02ec5833
--- /dev/null
+++ b/arch/xtensa/include/asm/bitops.h
@@ -0,0 +1,218 @@
+/*
+ * include/asm-xtensa/bitops.h
+ *
+ * Atomic operations that C can't guarantee us.Useful for resource counting etc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_BITOPS_H
+#define _XTENSA_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm/processor.h>
+#include <asm/byteorder.h>
+#include <asm/barrier.h>
+
+#include <asm-generic/bitops/non-atomic.h>
+
+#if XCHAL_HAVE_NSA
+
+static inline unsigned long __cntlz (unsigned long x)
+{
+ int lz;
+ asm ("nsau %0, %1" : "=r" (lz) : "r" (x));
+ return lz;
+}
+
+/*
+ * ffz: Find first zero in word. Undefined if no zero exists.
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+static inline int ffz(unsigned long x)
+{
+ return 31 - __cntlz(~x & -~x);
+}
+
+/*
+ * __ffs: Find first bit set in word. Return 0 for bit 0
+ */
+
+static inline unsigned long __ffs(unsigned long x)
+{
+ return 31 - __cntlz(x & -x);
+}
+
+/*
+ * ffs: Find first bit set in word. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+static inline int ffs(unsigned long x)
+{
+ return 32 - __cntlz(x & -x);
+}
+
+/*
+ * fls: Find last (most-significant) bit set in word.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static inline int fls (unsigned int x)
+{
+ return 32 - __cntlz(x);
+}
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ return 31 - __cntlz(word);
+}
+#else
+
+/* Use the generic implementation if we don't have the nsa/nsau instructions. */
+
+# include <asm-generic/bitops/ffs.h>
+# include <asm-generic/bitops/__ffs.h>
+# include <asm-generic/bitops/ffz.h>
+# include <asm-generic/bitops/fls.h>
+# include <asm-generic/bitops/__fls.h>
+
+#endif
+
+#include <asm-generic/bitops/fls64.h>
+
+#if XCHAL_HAVE_EXCLUSIVE
+
+#define BIT_OP(op, insn, inv) \
+static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
+{ \
+ unsigned long tmp; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " "insn" %[tmp], %[tmp], %[mask]\n" \
+ " s32ex %[tmp], %[addr]\n" \
+ " getex %[tmp]\n" \
+ " beqz %[tmp], 1b\n" \
+ : [tmp] "=&a" (tmp) \
+ : [mask] "a" (inv mask), [addr] "a" (p) \
+ : "memory"); \
+}
+
+#define TEST_AND_BIT_OP(op, insn, inv) \
+static inline int \
+arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[value], %[addr]\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32ex %[tmp], %[addr]\n" \
+ " getex %[tmp]\n" \
+ " beqz %[tmp], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value) \
+ : [mask] "a" (inv mask), [addr] "a" (p) \
+ : "memory"); \
+ \
+ return value & mask; \
+}
+
+#elif XCHAL_HAVE_S32C1I
+
+#define BIT_OP(op, insn, inv) \
+static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[value], %[mem]\n" \
+ " wsr %[value], scompare1\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32c1i %[tmp], %[mem]\n" \
+ " bne %[tmp], %[value], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value), \
+ [mem] "+m" (*p) \
+ : [mask] "a" (inv mask) \
+ : "memory"); \
+}
+
+#define TEST_AND_BIT_OP(op, insn, inv) \
+static inline int \
+arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[value], %[mem]\n" \
+ " wsr %[value], scompare1\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32c1i %[tmp], %[mem]\n" \
+ " bne %[tmp], %[value], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value), \
+ [mem] "+m" (*p) \
+ : [mask] "a" (inv mask) \
+ : "memory"); \
+ \
+ return tmp & mask; \
+}
+
+#else
+
+#define BIT_OP(op, insn, inv)
+#define TEST_AND_BIT_OP(op, insn, inv)
+
+#include <asm-generic/bitops/atomic.h>
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+#define BIT_OPS(op, insn, inv) \
+ BIT_OP(op, insn, inv) \
+ TEST_AND_BIT_OP(op, insn, inv)
+
+BIT_OPS(set, "or", )
+BIT_OPS(clear, "and", ~)
+BIT_OPS(change, "xor", )
+
+#undef BIT_OPS
+#undef BIT_OP
+#undef TEST_AND_BIT_OP
+
+#include <asm-generic/bitops/instrumented-atomic.h>
+
+#include <asm-generic/bitops/le.h>
+
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/sched.h>
+
+#endif /* _XTENSA_BITOPS_H */
diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h
new file mode 100644
index 000000000..6333bd1eb
--- /dev/null
+++ b/arch/xtensa/include/asm/bootparam.h
@@ -0,0 +1,50 @@
+/*
+ * include/asm-xtensa/bootparam.h
+ *
+ * Definition of the Linux/Xtensa boot parameter structure
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * (Concept borrowed from the 68K port)
+ */
+
+#ifndef _XTENSA_BOOTPARAM_H
+#define _XTENSA_BOOTPARAM_H
+
+#define BP_VERSION 0x0001
+
+#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
+#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
+#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
+#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console. */
+#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
+#define BP_TAG_FDT 0x1006 /* flat device tree addr */
+
+#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
+#define BP_TAG_LAST 0x7E0B /* last tag */
+
+#ifndef __ASSEMBLY__
+
+/* All records are aligned to 4 bytes */
+
+typedef struct bp_tag {
+ unsigned short id; /* tag id */
+ unsigned short size; /* size of this record excluding the structure*/
+ unsigned long data[]; /* data */
+} bp_tag_t;
+
+struct bp_meminfo {
+ unsigned long type;
+ unsigned long start;
+ unsigned long end;
+};
+
+#define MEMORY_TYPE_CONVENTIONAL 0x1000
+#define MEMORY_TYPE_NONE 0x2000
+
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
new file mode 100644
index 000000000..54e147ac2
--- /dev/null
+++ b/arch/xtensa/include/asm/cache.h
@@ -0,0 +1,40 @@
+/*
+ * include/asm-xtensa/cache.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHE_H
+#define _XTENSA_CACHE_H
+
+#include <asm/core.h>
+
+#define L1_CACHE_SHIFT XCHAL_DCACHE_LINEWIDTH
+#define L1_CACHE_BYTES XCHAL_DCACHE_LINESIZE
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
+#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
+#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
+#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
+
+/* Maximum cache size per way. */
+#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
+# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
+#else
+# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
+#endif
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+/*
+ * R/O after init is actually writable, it cannot go to .rodata
+ * according to vmlinux linker script.
+ */
+#define __ro_after_init __read_mostly
+
+#endif /* _XTENSA_CACHE_H */
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
new file mode 100644
index 000000000..34545ecfd
--- /dev/null
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -0,0 +1,214 @@
+/*
+ * include/asm-xtensa/cacheasm.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Tensilica Inc.
+ */
+
+#include <asm/cache.h>
+#include <asm/asmmacro.h>
+#include <linux/stringify.h>
+
+/*
+ * Define cache functions as macros here so that they can be used
+ * by the kernel and boot loader. We should consider moving them to a
+ * library that can be linked by both.
+ *
+ * Locking
+ *
+ * ___unlock_dcache_all
+ * ___unlock_icache_all
+ *
+ * Flush and invaldating
+ *
+ * ___flush_invalidate_dcache_{all|range|page}
+ * ___flush_dcache_{all|range|page}
+ * ___invalidate_dcache_{all|range|page}
+ * ___invalidate_icache_{all|range|page}
+ *
+ */
+
+
+ .macro __loop_cache_unroll ar at insn size line_width max_immed
+
+ .if (1 << (\line_width)) > (\max_immed)
+ .set _reps, 1
+ .elseif (2 << (\line_width)) > (\max_immed)
+ .set _reps, 2
+ .else
+ .set _reps, 4
+ .endif
+
+ __loopi \ar, \at, \size, (_reps << (\line_width))
+ .set _index, 0
+ .rep _reps
+ \insn \ar, _index << (\line_width)
+ .set _index, _index + 1
+ .endr
+ __endla \ar, \at, _reps << (\line_width)
+
+ .endm
+
+
+ .macro __loop_cache_all ar at insn size line_width max_immed
+
+ movi \ar, 0
+ __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
+
+ .endm
+
+
+ .macro __loop_cache_range ar as at insn line_width
+
+ extui \at, \ar, 0, \line_width
+ add \as, \as, \at
+
+ __loops \ar, \as, \at, \line_width
+ \insn \ar, 0
+ __endla \ar, \at, (1 << (\line_width))
+
+ .endm
+
+
+ .macro __loop_cache_page ar at insn line_width max_immed
+
+ __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
+
+ .endm
+
+
+ .macro ___unlock_dcache_all ar at
+
+#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___unlock_icache_all ar at
+
+#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
+ __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
+ XCHAL_ICACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___flush_invalidate_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_all ar at
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
+ XCHAL_ICACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+
+ .macro ___flush_invalidate_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_range ar as at
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+
+ .macro ___flush_invalidate_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_page ar as
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
+#endif
+
+ .endm
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
new file mode 100644
index 000000000..785a00ce8
--- /dev/null
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -0,0 +1,187 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHEFLUSH_H
+#define _XTENSA_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+
+/*
+ * Lo-level routines for cache flushing.
+ *
+ * invalidate data or instruction cache:
+ *
+ * __invalidate_icache_all()
+ * __invalidate_icache_page(adr)
+ * __invalidate_dcache_page(adr)
+ * __invalidate_icache_range(from,size)
+ * __invalidate_dcache_range(from,size)
+ *
+ * flush data cache:
+ *
+ * __flush_dcache_page(adr)
+ *
+ * flush and invalidate data cache:
+ *
+ * __flush_invalidate_dcache_all()
+ * __flush_invalidate_dcache_page(adr)
+ * __flush_invalidate_dcache_range(from,size)
+ *
+ * specials for cache aliasing:
+ *
+ * __flush_invalidate_dcache_page_alias(vaddr,paddr)
+ * __invalidate_dcache_page_alias(vaddr,paddr)
+ * __invalidate_icache_page_alias(vaddr,paddr)
+ */
+
+extern void __invalidate_dcache_all(void);
+extern void __invalidate_icache_all(void);
+extern void __invalidate_dcache_page(unsigned long);
+extern void __invalidate_icache_page(unsigned long);
+extern void __invalidate_icache_range(unsigned long, unsigned long);
+extern void __invalidate_dcache_range(unsigned long, unsigned long);
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+extern void __flush_invalidate_dcache_all(void);
+extern void __flush_dcache_page(unsigned long);
+extern void __flush_dcache_range(unsigned long, unsigned long);
+extern void __flush_invalidate_dcache_page(unsigned long);
+extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
+#else
+static inline void __flush_dcache_page(unsigned long va)
+{
+}
+static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
+{
+}
+# define __flush_invalidate_dcache_all() __invalidate_dcache_all()
+# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
+# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
+#endif
+
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
+extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
+extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
+#else
+static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+static inline void __invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+#endif
+#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
+extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
+#else
+static inline void __invalidate_icache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+#endif
+
+/*
+ * We have physically tagged caches - nothing to do here -
+ * unless we have cache aliasing.
+ *
+ * Pages can get remapped. Because this might change the 'color' of that page,
+ * we have to flush the cache before the PTE is changed.
+ * (see also Documentation/core-api/cachetlb.rst)
+ */
+
+#if defined(CONFIG_MMU) && \
+ ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
+
+#ifdef CONFIG_SMP
+void flush_cache_all(void);
+void flush_cache_range(struct vm_area_struct*, ulong, ulong);
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_cache_page(struct vm_area_struct*,
+ unsigned long, unsigned long);
+#else
+#define flush_cache_all local_flush_cache_all
+#define flush_cache_range local_flush_cache_range
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page local_flush_cache_page
+#endif
+
+#define local_flush_cache_all() \
+ do { \
+ __flush_invalidate_dcache_all(); \
+ __invalidate_icache_all(); \
+ } while (0)
+
+#define flush_cache_mm(mm) flush_cache_all()
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+#define flush_cache_vmap(start,end) flush_cache_all()
+#define flush_cache_vunmap(start,end) flush_cache_all()
+
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
+
+void local_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn);
+
+#else
+
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+
+#define flush_cache_vmap(start,end) do { } while (0)
+#define flush_cache_vunmap(start,end) do { } while (0)
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do { } while (0)
+
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page(vma, addr, pfn) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+
+#endif
+
+#define flush_icache_user_range flush_icache_range
+
+/* Ensure consistency between data and instruction cache. */
+#define local_flush_icache_range(start, end) \
+ do { \
+ __flush_dcache_range(start, (end) - (start)); \
+ __invalidate_icache_range(start,(end) - (start)); \
+ } while (0)
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+extern void copy_to_user_page(struct vm_area_struct*, struct page*,
+ unsigned long, void*, const void*, unsigned long);
+extern void copy_from_user_page(struct vm_area_struct*, struct page*,
+ unsigned long, void*, const void*, unsigned long);
+
+#else
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ __flush_dcache_range((unsigned long) dst, len); \
+ __invalidate_icache_range((unsigned long) dst, len); \
+ } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif
+
+#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
new file mode 100644
index 000000000..44ec1d0b2
--- /dev/null
+++ b/arch/xtensa/include/asm/checksum.h
@@ -0,0 +1,247 @@
+/*
+ * include/asm-xtensa/checksum.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CHECKSUM_H
+#define _XTENSA_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <linux/uaccess.h>
+#include <asm/core.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
+
+#define _HAVE_ARCH_CSUM_AND_COPY
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ */
+static inline
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ return csum_partial_copy_generic(src, dst, len);
+}
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+static inline
+__wsum csum_and_copy_from_user(const void __user *src, void *dst,
+ int len)
+{
+ if (!access_ok(src, len))
+ return 0;
+ return csum_partial_copy_generic((__force const void *)src, dst, len);
+}
+
+/*
+ * Fold a partial checksum
+ */
+
+static __inline__ __sum16 csum_fold(__wsum sum)
+{
+ unsigned int __dummy;
+ __asm__("extui %1, %0, 16, 16\n\t"
+ "extui %0 ,%0, 0, 16\n\t"
+ "add %0, %0, %1\n\t"
+ "slli %1, %0, 16\n\t"
+ "add %0, %0, %1\n\t"
+ "extui %0, %0, 16, 16\n\t"
+ "neg %0, %0\n\t"
+ "addi %0, %0, -1\n\t"
+ "extui %0, %0, 0, 16\n\t"
+ : "=r" (sum), "=&r" (__dummy)
+ : "0" (sum));
+ return (__force __sum16)sum;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum, tmp, endaddr;
+
+ __asm__ __volatile__(
+ "sub %0, %0, %0\n\t"
+#if XCHAL_HAVE_LOOPS
+ "loopgtz %2, 2f\n\t"
+#else
+ "beqz %2, 2f\n\t"
+ "slli %4, %2, 2\n\t"
+ "add %4, %4, %1\n\t"
+ "0:\t"
+#endif
+ "l32i %3, %1, 0\n\t"
+ "add %0, %0, %3\n\t"
+ "bgeu %0, %3, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "addi %1, %1, 4\n\t"
+#if !XCHAL_HAVE_LOOPS
+ "blt %1, %4, 0b\n\t"
+#endif
+ "2:\t"
+ /* Since the input registers which are loaded with iph and ihl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
+ "=&r" (endaddr)
+ : "1" (iph), "2" (ihl)
+ : "memory");
+
+ return csum_fold(sum);
+}
+
+static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+
+#ifdef __XTENSA_EL__
+ unsigned long len_proto = (len + proto) << 8;
+#elif defined(__XTENSA_EB__)
+ unsigned long len_proto = len + proto;
+#else
+# error processor byte order undefined!
+#endif
+ __asm__("add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %2\n\t"
+ "bgeu %0, %2, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %3\n\t"
+ "bgeu %0, %3, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ : "=r" (sum), "=r" (len_proto)
+ : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold (csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+ unsigned int __dummy;
+ __asm__("l32i %1, %2, 0\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 4\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 8\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 12\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 0\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 4\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 8\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 12\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %4\n\t"
+ "bgeu %0, %4, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %5\n\t"
+ "bgeu %0, %5, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ : "=r" (sum), "=&r" (__dummy)
+ : "r" (saddr), "r" (daddr),
+ "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
+ : "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst, int len)
+{
+ if (!access_ok(dst, len))
+ return 0;
+ return csum_partial_copy_generic(src, (__force void *)dst, len);
+}
+#endif
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
new file mode 100644
index 000000000..675a11ea8
--- /dev/null
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -0,0 +1,223 @@
+/*
+ * Atomic xchg and cmpxchg operations.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CMPXCHG_H
+#define _XTENSA_CMPXCHG_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bits.h>
+#include <linux/stringify.h>
+
+/*
+ * cmpxchg
+ */
+
+static inline unsigned long
+__cmpxchg_u32(volatile int *p, int old, int new)
+{
+#if XCHAL_HAVE_EXCLUSIVE
+ unsigned long tmp, result;
+
+ __asm__ __volatile__(
+ "1: l32ex %[result], %[addr]\n"
+ " bne %[result], %[cmp], 2f\n"
+ " mov %[tmp], %[new]\n"
+ " s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
+ "2:\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp)
+ : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
+ : "memory"
+ );
+
+ return result;
+#elif XCHAL_HAVE_S32C1I
+ __asm__ __volatile__(
+ " wsr %[cmp], scompare1\n"
+ " s32c1i %[new], %[mem]\n"
+ : [new] "+a" (new), [mem] "+m" (*p)
+ : [cmp] "a" (old)
+ : "memory"
+ );
+
+ return new;
+#else
+ __asm__ __volatile__(
+ " rsil a14, "__stringify(TOPLEVEL)"\n"
+ " l32i %[old], %[mem]\n"
+ " bne %[old], %[cmp], 1f\n"
+ " s32i %[new], %[mem]\n"
+ "1:\n"
+ " wsr a14, ps\n"
+ " rsync\n"
+ : [old] "=&a" (old), [mem] "+m" (*p)
+ : [cmp] "a" (old), [new] "r" (new)
+ : "a14", "memory");
+ return old;
+#endif
+}
+/* This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg(). */
+
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4: return __cmpxchg_u32(ptr, old, new);
+ default: __cmpxchg_called_with_bad_pointer();
+ return old;
+ }
+}
+
+#define arch_cmpxchg(ptr,o,n) \
+ ({ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof (*(ptr))); \
+ })
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ default:
+ return __generic_cmpxchg_local(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define arch_cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
+
+/*
+ * xchg_u32
+ *
+ * Note that a14 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a14 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
+{
+#if XCHAL_HAVE_EXCLUSIVE
+ unsigned long tmp, result;
+
+ __asm__ __volatile__(
+ "1: l32ex %[result], %[addr]\n"
+ " mov %[tmp], %[val]\n"
+ " s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp)
+ : [val] "a" (val), [addr] "a" (m)
+ : "memory"
+ );
+
+ return result;
+#elif XCHAL_HAVE_S32C1I
+ unsigned long tmp, result;
+ __asm__ __volatile__(
+ "1: l32i %[tmp], %[mem]\n"
+ " mov %[result], %[val]\n"
+ " wsr %[tmp], scompare1\n"
+ " s32c1i %[result], %[mem]\n"
+ " bne %[result], %[tmp], 1b\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp),
+ [mem] "+m" (*m)
+ : [val] "a" (val)
+ : "memory"
+ );
+ return result;
+#else
+ unsigned long tmp;
+ __asm__ __volatile__(
+ " rsil a14, "__stringify(TOPLEVEL)"\n"
+ " l32i %[tmp], %[mem]\n"
+ " s32i %[val], %[mem]\n"
+ " wsr a14, ps\n"
+ " rsync\n"
+ : [tmp] "=&a" (tmp), [mem] "+m" (*m)
+ : [val] "a" (val)
+ : "a14", "memory");
+ return tmp;
+#endif
+}
+
+#define arch_xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
+{
+ int off = (unsigned long)ptr % sizeof(u32);
+ volatile u32 *p = ptr - off;
+#ifdef __BIG_ENDIAN
+ int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
+#else
+ int bitoff = off * BITS_PER_BYTE;
+#endif
+ u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
+ u32 oldv, newv;
+ u32 ret;
+
+ do {
+ oldv = READ_ONCE(*p);
+ ret = (oldv & bitmask) >> bitoff;
+ newv = (oldv & ~bitmask) | (x << bitoff);
+ } while (__cmpxchg_u32(p, oldv, newv) != oldv);
+
+ return ret;
+}
+
+/*
+ * This only works if the compiler isn't horribly bad at optimizing.
+ * gcc-2.5.8 reportedly can't handle this, but I define that one to
+ * be dead anyway.
+ */
+
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__arch_xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ return xchg_small(ptr, x, 1);
+ case 2:
+ return xchg_small(ptr, x, 2);
+ case 4:
+ return xchg_u32(ptr, x);
+ default:
+ __xchg_called_with_bad_pointer();
+ return x;
+ }
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _XTENSA_CMPXCHG_H */
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
new file mode 100644
index 000000000..3b1a0d5d2
--- /dev/null
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -0,0 +1,155 @@
+/*
+ * include/asm-xtensa/coprocessor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
+ */
+
+
+#ifndef _XTENSA_COPROCESSOR_H
+#define _XTENSA_COPROCESSOR_H
+
+#include <variant/tie.h>
+#include <asm/core.h>
+#include <asm/types.h>
+
+#ifdef __ASSEMBLY__
+# include <variant/tie-asm.h>
+
+.macro xchal_sa_start a b
+ .set .Lxchal_pofs_, 0
+ .set .Lxchal_ofs_, 0
+.endm
+
+.macro xchal_sa_align ptr minofs maxofs ofsalign totalign
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1
+ .set .Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_
+.endm
+
+#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
+ | XTHAL_SAS_CC \
+ | XTHAL_SAS_CALR | XTHAL_SAS_CALE )
+
+.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_OPT_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+
+.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_OPT_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+#undef _SELECT
+
+#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
+ | XTHAL_SAS_NOCC \
+ | XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
+
+.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_USER_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+
+.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_USER_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+#undef _SELECT
+
+
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
+ *
+ * XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured.
+ *
+ */
+
+#define XTENSA_HAVE_COPROCESSOR(x) \
+ ((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x)))
+#define XTENSA_HAVE_COPROCESSORS \
+ (XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK)
+#define XTENSA_HAVE_IO_PORT(x) \
+ (XCHAL_CP_PORT_MASK & (1 << (x)))
+#define XTENSA_HAVE_IO_PORTS \
+ XCHAL_CP_PORT_MASK
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Additional registers.
+ * We define three types of additional registers:
+ * ext: extra registers that are used by the compiler
+ * cpn: optional registers that can be used by a user application
+ * cpX: coprocessor registers that can only be used if the corresponding
+ * CPENABLE bit is set.
+ */
+
+#define XCHAL_SA_REG(list,cc,abi,type,y,name,z,align,size,...) \
+ __REG ## list (cc, abi, type, name, size, align)
+
+#define __REG0(cc,abi,t,name,s,a) __REG0_ ## cc (abi,name)
+#define __REG1(cc,abi,t,name,s,a) __REG1_ ## cc (name)
+#define __REG2(cc,abi,type,...) __REG2_ ## type (__VA_ARGS__)
+
+#define __REG0_0(abi,name)
+#define __REG0_1(abi,name) __REG0_1 ## abi (name)
+#define __REG0_10(name) __u32 name;
+#define __REG0_11(name) __u32 name;
+#define __REG0_12(name)
+
+#define __REG1_0(name) __u32 name;
+#define __REG1_1(name)
+
+#define __REG2_0(n,s,a) __u32 name;
+#define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
+#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
+
+typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t
+ __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
+typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t
+ __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
+
+#if XTENSA_HAVE_COPROCESSORS
+
+typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t
+ __attribute__ ((aligned (XCHAL_CP0_SA_ALIGN)));
+typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t
+ __attribute__ ((aligned (XCHAL_CP1_SA_ALIGN)));
+typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t
+ __attribute__ ((aligned (XCHAL_CP2_SA_ALIGN)));
+typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t
+ __attribute__ ((aligned (XCHAL_CP3_SA_ALIGN)));
+typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t
+ __attribute__ ((aligned (XCHAL_CP4_SA_ALIGN)));
+typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t
+ __attribute__ ((aligned (XCHAL_CP5_SA_ALIGN)));
+typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
+ __attribute__ ((aligned (XCHAL_CP6_SA_ALIGN)));
+typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
+ __attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
+
+struct thread_info;
+void coprocessor_flush(struct thread_info *ti, int cp_index);
+void coprocessor_release_all(struct thread_info *ti);
+void coprocessor_flush_all(struct thread_info *ti);
+void coprocessor_flush_release_all(struct thread_info *ti);
+void local_coprocessors_flush_release_all(void);
+
+#endif /* XTENSA_HAVE_COPROCESSORS */
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _XTENSA_COPROCESSOR_H */
diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
new file mode 100644
index 000000000..6f02f6f21
--- /dev/null
+++ b/arch/xtensa/include/asm/core.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Cadence Design Systems Inc. */
+
+#ifndef _ASM_XTENSA_CORE_H
+#define _ASM_XTENSA_CORE_H
+
+#include <variant/core.h>
+
+#ifndef XCHAL_HAVE_DIV32
+#define XCHAL_HAVE_DIV32 0
+#endif
+
+#ifndef XCHAL_HAVE_EXCLUSIVE
+#define XCHAL_HAVE_EXCLUSIVE 0
+#endif
+
+#ifndef XCHAL_HAVE_EXTERN_REGS
+#define XCHAL_HAVE_EXTERN_REGS 0
+#endif
+
+#ifndef XCHAL_HAVE_MPU
+#define XCHAL_HAVE_MPU 0
+#endif
+
+#ifndef XCHAL_HAVE_VECBASE
+#define XCHAL_HAVE_VECBASE 0
+#endif
+
+#ifndef XCHAL_SPANNING_WAY
+#define XCHAL_SPANNING_WAY 0
+#endif
+
+#ifndef XCHAL_HAVE_TRAX
+#define XCHAL_HAVE_TRAX 0
+#endif
+
+#ifndef XCHAL_NUM_PERF_COUNTERS
+#define XCHAL_NUM_PERF_COUNTERS 0
+#endif
+
+#if XCHAL_HAVE_WINDOWED
+#if defined(CONFIG_USER_ABI_DEFAULT) || defined(CONFIG_USER_ABI_CALL0_PROBE)
+/* Whether windowed ABI is supported in userspace. */
+#define USER_SUPPORT_WINDOWED
+#endif
+#if defined(__XTENSA_WINDOWED_ABI__) || defined(USER_SUPPORT_WINDOWED)
+/* Whether windowed ABI is supported either in userspace or in the kernel. */
+#define SUPPORT_WINDOWED
+#endif
+#endif
+
+/* Xtensa ABI requires stack alignment to be at least 16 */
+#if XCHAL_DATA_WIDTH > 16
+#define XTENSA_STACK_ALIGNMENT XCHAL_DATA_WIDTH
+#else
+#define XTENSA_STACK_ALIGNMENT 16
+#endif
+
+#ifndef XCHAL_HW_MIN_VERSION
+#if defined(XCHAL_HW_MIN_VERSION_MAJOR) && defined(XCHAL_HW_MIN_VERSION_MINOR)
+#define XCHAL_HW_MIN_VERSION (XCHAL_HW_MIN_VERSION_MAJOR * 100 + \
+ XCHAL_HW_MIN_VERSION_MINOR)
+#else
+#define XCHAL_HW_MIN_VERSION 0
+#endif
+#endif
+
+#endif
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
new file mode 100644
index 000000000..08010dbf5
--- /dev/null
+++ b/arch/xtensa/include/asm/current.h
@@ -0,0 +1,40 @@
+/*
+ * include/asm-xtensa/current.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CURRENT_H
+#define _XTENSA_CURRENT_H
+
+#include <asm/thread_info.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+static inline struct task_struct *get_current(void)
+{
+ return current_thread_info()->task;
+}
+
+#define current get_current()
+
+register unsigned long current_stack_pointer __asm__("a1");
+
+#else
+
+#define GET_CURRENT(reg,sp) \
+ GET_THREAD_INFO(reg,sp); \
+ l32i reg, reg, TI_TASK \
+
+#endif
+
+
+#endif /* XTENSA_CURRENT_H */
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h
new file mode 100644
index 000000000..24304b39a
--- /dev/null
+++ b/arch/xtensa/include/asm/delay.h
@@ -0,0 +1,75 @@
+/*
+ * include/asm-xtensa/delay.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ */
+
+#ifndef _XTENSA_DELAY_H
+#define _XTENSA_DELAY_H
+
+#include <asm/timex.h>
+#include <asm/param.h>
+
+extern unsigned long loops_per_jiffy;
+
+static inline void __delay(unsigned long loops)
+{
+ if (__builtin_constant_p(loops) && loops < 2)
+ __asm__ __volatile__ ("nop");
+ else if (loops >= 2)
+ /* 2 cycles per loop. */
+ __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
+ : "+r" (loops));
+}
+
+/* Undefined function to get compile-time error */
+void __bad_udelay(void);
+void __bad_ndelay(void);
+
+#define __MAX_UDELAY 30000
+#define __MAX_NDELAY 30000
+
+static inline void __udelay(unsigned long usecs)
+{
+ unsigned long start = get_ccount();
+ unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5;
+
+ /* Note: all variables are unsigned (can wrap around)! */
+ while (((unsigned long)get_ccount()) - start < cycles)
+ cpu_relax();
+}
+
+static inline void udelay(unsigned long usec)
+{
+ if (__builtin_constant_p(usec) && usec >= __MAX_UDELAY)
+ __bad_udelay();
+ else
+ __udelay(usec);
+}
+
+static inline void __ndelay(unsigned long nsec)
+{
+ /*
+ * Inner shift makes sure multiplication doesn't overflow
+ * for legitimate nsec values
+ */
+ unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15;
+ __delay(cycles);
+}
+
+#define ndelay(n) ndelay(n)
+
+static inline void ndelay(unsigned long nsec)
+{
+ if (__builtin_constant_p(nsec) && nsec >= __MAX_NDELAY)
+ __bad_ndelay();
+ else
+ __ndelay(nsec);
+}
+
+#endif
diff --git a/arch/xtensa/include/asm/dma.h b/arch/xtensa/include/asm/dma.h
new file mode 100644
index 000000000..172644539
--- /dev/null
+++ b/arch/xtensa/include/asm/dma.h
@@ -0,0 +1,55 @@
+/*
+ * include/asm-xtensa/dma.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_DMA_H
+#define _XTENSA_DMA_H
+
+#include <asm/io.h> /* need byte IO */
+
+/*
+ * This is only to be defined if we have PC-like DMA.
+ * By default this is not true on an Xtensa processor,
+ * however on boards with a PCI bus, such functionality
+ * might be emulated externally.
+ *
+ * NOTE: there still exists driver code that assumes
+ * this is defined, eg. drivers/sound/soundcard.c (as of 2.4).
+ */
+#define MAX_DMA_CHANNELS 8
+
+/*
+ * The maximum virtual address to which DMA transfers
+ * can be performed on this platform.
+ *
+ * NOTE: This is board (platform) specific, not processor-specific!
+ *
+ * NOTE: This assumes DMA transfers can only be performed on
+ * the section of physical memory contiguously mapped in virtual
+ * space for the kernel. For the Xtensa architecture, this
+ * means the maximum possible size of this DMA area is
+ * the size of the statically mapped kernel segment
+ * (XCHAL_KSEG_{CACHED,BYPASS}_SIZE), ie. 128 MB.
+ *
+ * NOTE: When the entire KSEG area is DMA capable, we subtract
+ * one from the max address so that the virt_to_phys() macro
+ * works correctly on the address (otherwise the address
+ * enters another area, and virt_to_phys() may not return
+ * the value desired).
+ */
+
+#ifndef MAX_DMA_ADDRESS
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
+#endif
+
+/* Reserve and release a DMA channel */
+extern int request_dma(unsigned int dmanr, const char * device_id);
+extern void free_dma(unsigned int dmanr);
+
+#endif
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
new file mode 100644
index 000000000..ffcf1ada1
--- /dev/null
+++ b/arch/xtensa/include/asm/elf.h
@@ -0,0 +1,196 @@
+/*
+ * include/asm-xtensa/elf.h
+ *
+ * ELF register definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ELF_H
+#define _XTENSA_ELF_H
+
+#include <asm/ptrace.h>
+#include <asm/coprocessor.h>
+#include <linux/elf-em.h>
+
+/* Xtensa processor ELF architecture-magic number */
+
+#define EM_XTENSA_OLD 0xABC7
+
+/* Xtensa relocations defined by the ABIs */
+
+#define R_XTENSA_NONE 0
+#define R_XTENSA_32 1
+#define R_XTENSA_RTLD 2
+#define R_XTENSA_GLOB_DAT 3
+#define R_XTENSA_JMP_SLOT 4
+#define R_XTENSA_RELATIVE 5
+#define R_XTENSA_PLT 6
+#define R_XTENSA_OP0 8
+#define R_XTENSA_OP1 9
+#define R_XTENSA_OP2 10
+#define R_XTENSA_ASM_EXPAND 11
+#define R_XTENSA_ASM_SIMPLIFY 12
+#define R_XTENSA_GNU_VTINHERIT 15
+#define R_XTENSA_GNU_VTENTRY 16
+#define R_XTENSA_DIFF8 17
+#define R_XTENSA_DIFF16 18
+#define R_XTENSA_DIFF32 19
+#define R_XTENSA_SLOT0_OP 20
+#define R_XTENSA_SLOT1_OP 21
+#define R_XTENSA_SLOT2_OP 22
+#define R_XTENSA_SLOT3_OP 23
+#define R_XTENSA_SLOT4_OP 24
+#define R_XTENSA_SLOT5_OP 25
+#define R_XTENSA_SLOT6_OP 26
+#define R_XTENSA_SLOT7_OP 27
+#define R_XTENSA_SLOT8_OP 28
+#define R_XTENSA_SLOT9_OP 29
+#define R_XTENSA_SLOT10_OP 30
+#define R_XTENSA_SLOT11_OP 31
+#define R_XTENSA_SLOT12_OP 32
+#define R_XTENSA_SLOT13_OP 33
+#define R_XTENSA_SLOT14_OP 34
+#define R_XTENSA_SLOT0_ALT 35
+#define R_XTENSA_SLOT1_ALT 36
+#define R_XTENSA_SLOT2_ALT 37
+#define R_XTENSA_SLOT3_ALT 38
+#define R_XTENSA_SLOT4_ALT 39
+#define R_XTENSA_SLOT5_ALT 40
+#define R_XTENSA_SLOT6_ALT 41
+#define R_XTENSA_SLOT7_ALT 42
+#define R_XTENSA_SLOT8_ALT 43
+#define R_XTENSA_SLOT9_ALT 44
+#define R_XTENSA_SLOT10_ALT 45
+#define R_XTENSA_SLOT11_ALT 46
+#define R_XTENSA_SLOT12_ALT 47
+#define R_XTENSA_SLOT13_ALT 48
+#define R_XTENSA_SLOT14_ALT 49
+
+/* ELF register definitions. This is needed for core dump support. */
+
+typedef unsigned long elf_greg_t;
+
+typedef struct user_pt_regs xtensa_gregset_t;
+
+#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#define ELF_NFPREG 18
+
+typedef unsigned int elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+
+#define elf_check_arch(x) ( ( (x)->e_machine == EM_XTENSA ) || \
+ ( (x)->e_machine == EM_XTENSA_OLD ) )
+
+#define ELFOSABI_XTENSA_FDPIC 65
+#define elf_check_fdpic(x) ((x)->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC)
+#define ELF_FDPIC_CORE_EFLAGS 0
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+
+#ifdef __XTENSA_EL__
+# define ELF_DATA ELFDATA2LSB
+#elif defined(__XTENSA_EB__)
+# define ELF_DATA ELFDATA2MSB
+#else
+# error processor byte order undefined!
+#endif
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_ARCH EM_XTENSA
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+#define CORE_DUMP_USE_REGSET
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this CPU supports. This could be done in user space,
+ * but it's not easy, and we've already done it here.
+ */
+
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ * For the moment, we have only optimizations for the Intel generations,
+ * but that could change...
+ */
+
+#define ELF_PLATFORM (NULL)
+
+/*
+ * The Xtensa processor ABI says that when the program starts, a2
+ * contains a pointer to a function which might be registered using
+ * `atexit'. This provides a mean for the dynamic linker to call
+ * DT_FINI functions for shared libraries that have been loaded before
+ * the code runs.
+ *
+ * A value of 0 tells we have no such handler.
+ *
+ * We might as well make sure everything else is cleared too (except
+ * for the stack pointer in a1), just to make things more
+ * deterministic. Also, clearing a0 terminates debugger backtraces.
+ */
+
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { \
+ (_r)->areg[0] = 0; /*(_r)->areg[1] = 0;*/ \
+ (_r)->areg[2] = 0; (_r)->areg[3] = 0; \
+ (_r)->areg[4] = 0; (_r)->areg[5] = 0; \
+ (_r)->areg[6] = 0; (_r)->areg[7] = 0; \
+ (_r)->areg[8] = 0; (_r)->areg[9] = 0; \
+ (_r)->areg[10] = 0; (_r)->areg[11] = 0; \
+ (_r)->areg[12] = 0; (_r)->areg[13] = 0; \
+ (_r)->areg[14] = 0; (_r)->areg[15] = 0; \
+ } while (0)
+
+#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \
+ do { \
+ (_r)->areg[4] = _exec_map_addr; \
+ (_r)->areg[5] = _interp_map_addr; \
+ (_r)->areg[6] = dynamic_addr; \
+ } while (0)
+
+typedef struct {
+ xtregs_opt_t opt;
+ xtregs_user_t user;
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_cp0_t cp0;
+ xtregs_cp1_t cp1;
+ xtregs_cp2_t cp2;
+ xtregs_cp3_t cp3;
+ xtregs_cp4_t cp4;
+ xtregs_cp5_t cp5;
+ xtregs_cp6_t cp6;
+ xtregs_cp7_t cp7;
+#endif
+} elf_xtregs_t;
+
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
+
+#endif /* _XTENSA_ELF_H */
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
new file mode 100644
index 000000000..1c65dc1d3
--- /dev/null
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -0,0 +1,38 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <linux/pgtable.h>
+#include <asm/kmap_size.h>
+
+/* The map slots for temporary mappings via kmap_atomic/local(). */
+enum fixed_addresses {
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN +
+ (KM_MAX_IDX * NR_CPUS * DCACHE_N_COLORS) - 1,
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_END (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+/* Enforce that FIXADDR_START is PMD aligned to handle cache aliasing */
+#define FIXADDR_START ((FIXADDR_END - FIXADDR_SIZE) & PMD_MASK)
+#define FIXADDR_TOP (FIXADDR_START + FIXADDR_SIZE - PAGE_SIZE)
+
+#include <asm-generic/fixmap.h>
+
+#endif /* CONFIG_HIGHMEM */
+#endif
diff --git a/arch/xtensa/include/asm/flat.h b/arch/xtensa/include/asm/flat.h
new file mode 100644
index 000000000..ed5870c77
--- /dev/null
+++ b/arch/xtensa/include/asm/flat.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_XTENSA_FLAT_H
+#define __ASM_XTENSA_FLAT_H
+
+#include <asm/unaligned.h>
+
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr)
+{
+ *addr = get_unaligned((__force u32 *)rp);
+ return 0;
+}
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+ put_unaligned(addr, (__force u32 *)rp);
+ return 0;
+}
+
+#endif /* __ASM_XTENSA_FLAT_H */
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
new file mode 100644
index 000000000..0ea4f84cd
--- /dev/null
+++ b/arch/xtensa/include/asm/ftrace.h
@@ -0,0 +1,31 @@
+/*
+ * arch/xtensa/include/asm/ftrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_FTRACE_H
+#define _XTENSA_FTRACE_H
+
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+extern unsigned long return_address(unsigned level);
+#define ftrace_return_address(n) return_address(n)
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+#define MCOUNT_INSN_SIZE 3
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+#define mcount _mcount
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* _XTENSA_FTRACE_H */
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
new file mode 100644
index 000000000..a6f7d7ab5
--- /dev/null
+++ b/arch/xtensa/include/asm/futex.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Atomic futex routines
+ *
+ * Based on the PowerPC implementataion
+ *
+ * Copyright (C) 2013 TangoTec Ltd.
+ *
+ * Baruch Siach <baruch@tkos.co.il>
+ */
+
+#ifndef _ASM_XTENSA_FUTEX_H
+#define _ASM_XTENSA_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+
+#define arch_futex_atomic_op_inuser arch_futex_atomic_op_inuser
+#define futex_atomic_cmpxchg_inatomic futex_atomic_cmpxchg_inatomic
+#include <asm-generic/futex.h>
+
+#if XCHAL_HAVE_EXCLUSIVE
+#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
+ __asm__ __volatile( \
+ "1: l32ex %[oldval], %[addr]\n" \
+ insn "\n" \
+ "2: s32ex %[newval], %[addr]\n" \
+ " getex %[newval]\n" \
+ " beqz %[newval], 1b\n" \
+ " movi %[newval], 0\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ " .literal_position\n" \
+ "5: movi %[oldval], 3b\n" \
+ " movi %[newval], %[fault]\n" \
+ " jx %[oldval]\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .long 1b, 5b, 2b, 5b\n" \
+ " .previous\n" \
+ : [oldval] "=&r" (old), [newval] "=&r" (ret) \
+ : [addr] "r" (uaddr), [oparg] "r" (arg), \
+ [fault] "I" (-EFAULT) \
+ : "memory")
+#elif XCHAL_HAVE_S32C1I
+#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
+ __asm__ __volatile( \
+ "1: l32i %[oldval], %[mem]\n" \
+ insn "\n" \
+ " wsr %[oldval], scompare1\n" \
+ "2: s32c1i %[newval], %[mem]\n" \
+ " bne %[newval], %[oldval], 1b\n" \
+ " movi %[newval], 0\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ " .literal_position\n" \
+ "5: movi %[oldval], 3b\n" \
+ " movi %[newval], %[fault]\n" \
+ " jx %[oldval]\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .long 1b, 5b, 2b, 5b\n" \
+ " .previous\n" \
+ : [oldval] "=&r" (old), [newval] "=&r" (ret), \
+ [mem] "+m" (*(uaddr)) \
+ : [oparg] "r" (arg), [fault] "I" (-EFAULT) \
+ : "memory")
+#endif
+
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
+{
+#if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
+ int oldval = 0, ret;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %[newval], %[oparg]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+#else
+ return futex_atomic_op_inuser_local(op, oparg, oval, uaddr);
+#endif
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+#if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
+ unsigned long tmp;
+ int ret = 0;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __asm__ __volatile__ (
+ " # futex_atomic_cmpxchg_inatomic\n"
+#if XCHAL_HAVE_EXCLUSIVE
+ "1: l32ex %[tmp], %[addr]\n"
+ " s32i %[tmp], %[uval], 0\n"
+ " bne %[tmp], %[oldval], 2f\n"
+ " mov %[tmp], %[newval]\n"
+ "3: s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
+#elif XCHAL_HAVE_S32C1I
+ " wsr %[oldval], scompare1\n"
+ "1: s32c1i %[newval], %[addr], 0\n"
+ " s32i %[newval], %[uval], 0\n"
+#endif
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ " .align 4\n"
+ " .literal_position\n"
+ "4: movi %[tmp], 2b\n"
+ " movi %[ret], %[fault]\n"
+ " jx %[tmp]\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ " .long 1b, 4b\n"
+#if XCHAL_HAVE_EXCLUSIVE
+ " .long 3b, 4b\n"
+#endif
+ " .previous\n"
+ : [ret] "+r" (ret), [newval] "+r" (newval), [tmp] "=&r" (tmp)
+ : [addr] "r" (uaddr), [oldval] "r" (oldval), [uval] "r" (uval),
+ [fault] "I" (-EFAULT)
+ : "memory");
+
+ return ret;
+#else
+ return futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval);
+#endif
+}
+
+#endif /* _ASM_XTENSA_FUTEX_H */
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
new file mode 100644
index 000000000..34b8b620e
--- /dev/null
+++ b/arch/xtensa/include/asm/highmem.h
@@ -0,0 +1,84 @@
+/*
+ * include/asm-xtensa/highmem.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_HIGHMEM_H
+#define _XTENSA_HIGHMEM_H
+
+#ifdef CONFIG_HIGHMEM
+#include <linux/wait.h>
+#include <linux/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+
+#define PKMAP_BASE ((FIXADDR_START - \
+ (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
+#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS)
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot PAGE_KERNEL_EXEC
+
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+#define get_pkmap_color get_pkmap_color
+static inline int get_pkmap_color(struct page *page)
+{
+ return DCACHE_ALIAS(page_to_phys(page));
+}
+
+extern unsigned int last_pkmap_nr_arr[];
+
+static inline unsigned int get_next_pkmap_nr(unsigned int color)
+{
+ last_pkmap_nr_arr[color] =
+ (last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK;
+ return last_pkmap_nr_arr[color] + color;
+}
+
+static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
+{
+ return pkmap_nr < DCACHE_N_COLORS;
+}
+
+static inline int get_pkmap_entries_count(unsigned int color)
+{
+ return LAST_PKMAP / DCACHE_N_COLORS;
+}
+
+extern wait_queue_head_t pkmap_map_wait_arr[];
+
+static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
+{
+ return pkmap_map_wait_arr + color;
+}
+
+enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
+#define arch_kmap_local_map_idx kmap_local_map_idx
+
+enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
+#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
+
+#endif
+
+extern pte_t *pkmap_page_table;
+
+static inline void flush_cache_kmaps(void)
+{
+ flush_cache_all();
+}
+
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
+
+void kmap_init(void);
+
+#endif /* CONFIG_HIGHMEM */
+#endif
diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000..9ec86f440
--- /dev/null
+++ b/arch/xtensa/include/asm/hw_breakpoint.h
@@ -0,0 +1,62 @@
+/*
+ * Xtensa hardware breakpoints/watchpoints handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef __ASM_XTENSA_HW_BREAKPOINT_H
+#define __ASM_XTENSA_HW_BREAKPOINT_H
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <uapi/linux/hw_breakpoint.h>
+
+/* Breakpoint */
+#define XTENSA_BREAKPOINT_EXECUTE 0
+
+/* Watchpoints */
+#define XTENSA_BREAKPOINT_LOAD 1
+#define XTENSA_BREAKPOINT_STORE 2
+
+struct arch_hw_breakpoint {
+ unsigned long address;
+ u16 len;
+ u16 type;
+};
+
+struct perf_event_attr;
+struct perf_event;
+struct pt_regs;
+struct task_struct;
+
+int hw_breakpoint_slots(int type);
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data);
+
+int arch_install_hw_breakpoint(struct perf_event *bp);
+void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+void hw_breakpoint_pmu_read(struct perf_event *bp);
+int check_hw_breakpoint(struct pt_regs *regs);
+void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
+void restore_dbreak(void);
+
+#else
+
+struct task_struct;
+
+static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+}
+
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif /* __ASM_XTENSA_HW_BREAKPOINT_H */
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
new file mode 100644
index 000000000..574795a20
--- /dev/null
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -0,0 +1,245 @@
+/*
+ * arch/xtensa/include/asm/initialize_mmu.h
+ *
+ * Initializes MMU:
+ *
+ * For the new V3 MMU we remap the TLB from virtual == physical
+ * to the standard Linux mapping used in earlier MMU's.
+ *
+ * For the MMU we also support a new configuration register that
+ * specifies how the S32C1I instruction operates with the cache
+ * controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica, Inc.
+ *
+ * Marc Gauthier <marc@tensilica.com>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#ifndef _XTENSA_INITIALIZE_MMU_H
+#define _XTENSA_INITIALIZE_MMU_H
+
+#include <linux/init.h>
+#include <linux/pgtable.h>
+#include <asm/vectors.h>
+
+#if XCHAL_HAVE_PTP_MMU
+#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#else
+#define CA_WRITEBACK (0x4)
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define XTENSA_HWVERSION_RC_2009_0 230000
+
+ .macro initialize_mmu
+
+#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+/*
+ * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
+ * For details see Documentation/arch/xtensa/atomctl.rst
+ */
+#if XCHAL_DCACHE_IS_COHERENT
+ movi a3, 0x25 /* For SMP/MX -- internal for writeback,
+ * RCW otherwise
+ */
+#else
+ movi a3, 0x29 /* non-MX -- Most cores use Std Memory
+ * Controlers which usually can't use RCW
+ */
+#endif
+ wsr a3, atomctl
+#endif /* XCHAL_HAVE_S32C1I &&
+ * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+ */
+
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/*
+ * Have MMU v3
+ */
+
+#if !XCHAL_HAVE_VECBASE
+# error "MMU v3 requires reloc vectors"
+#endif
+
+ movi a1, 0
+ _call0 1f
+ _j 2f
+
+ .align 4
+1:
+
+#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
+#define TEMP_MAPPING_VADDR 0x40000000
+#else
+#define TEMP_MAPPING_VADDR 0x00000000
+#endif
+
+ /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
+
+ movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
+ idtlb a2
+ iitlb a2
+ isync
+
+ /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
+ * and jump to the new mapping.
+ */
+
+ srli a3, a0, 27
+ slli a3, a3, 27
+ addi a3, a3, CA_BYPASS
+ addi a7, a2, 5 - XCHAL_SPANNING_WAY
+ wdtlb a3, a7
+ witlb a3, a7
+ isync
+
+ slli a4, a0, 5
+ srli a4, a4, 5
+ addi a5, a2, -XCHAL_SPANNING_WAY
+ add a4, a4, a5
+ jx a4
+
+ /* Step 3: unmap everything other than current area.
+ * Start at 0x60000000, wrap around, and end with 0x20000000
+ */
+2: movi a4, 0x20000000
+ add a5, a2, a4
+3: idtlb a5
+ iitlb a5
+ add a5, a5, a4
+ bne a5, a2, 3b
+
+ /* Step 4: Setup MMU with the requested static mappings. */
+
+ movi a6, 0x01000000
+ wsr a6, ITLBCFG
+ wsr a6, DTLBCFG
+ isync
+
+ movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+#ifdef CONFIG_XTENSA_KSEG_512M
+ movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+#endif
+
+ movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
+ movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
+ movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+ isync
+
+ /* Jump to self, using final mappings. */
+ movi a4, 1f
+ jx a4
+
+1:
+ /* Step 5: remove temporary mapping. */
+ idtlb a7
+ iitlb a7
+ isync
+
+ movi a0, 0
+ wsr a0, ptevaddr
+ rsync
+
+#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
+ XCHAL_HAVE_SPANNING_WAY */
+
+ .endm
+
+ .macro initialize_cacheattr
+
+#if !defined(CONFIG_MMU) && (XCHAL_HAVE_TLBS || XCHAL_HAVE_MPU)
+#if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
+#error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
+#endif
+
+#if XCHAL_HAVE_MPU
+ __REFCONST
+ .align 4
+.Lattribute_table:
+ .long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
+ .long 0x006600, 0x000000, 0x000000, 0x000000
+ .long 0x000000, 0x000000, 0x000000, 0x000000
+ .long 0x000000, 0x000000, 0x000000, 0x000000
+ .previous
+
+ movi a3, .Lattribute_table
+ movi a4, CONFIG_MEMMAP_CACHEATTR
+ movi a5, 1
+ movi a6, XCHAL_MPU_ENTRIES
+ movi a10, 0x20000000
+ movi a11, -1
+1:
+ sub a5, a5, a10
+ extui a8, a4, 28, 4
+ beq a8, a11, 2f
+ addi a6, a6, -1
+ mov a11, a8
+2:
+ addx4 a9, a8, a3
+ l32i a9, a9, 0
+ or a9, a9, a6
+ wptlb a9, a5
+ slli a4, a4, 4
+ bgeu a5, a10, 1b
+
+#else
+ movi a5, XCHAL_SPANNING_WAY
+ movi a6, ~_PAGE_ATTRIB_MASK
+ movi a4, CONFIG_MEMMAP_CACHEATTR
+ movi a8, 0x20000000
+1:
+ rdtlb1 a3, a5
+ xor a3, a3, a4
+ and a3, a3, a6
+ xor a3, a3, a4
+ wdtlb a3, a5
+ ritlb1 a3, a5
+ xor a3, a3, a4
+ and a3, a3, a6
+ xor a3, a3, a4
+ witlb a3, a5
+
+ add a5, a5, a8
+ srli a4, a4, 4
+ bgeu a5, a8, 1b
+
+ isync
+#endif
+#endif
+
+ .endm
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _XTENSA_INITIALIZE_MMU_H */
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
new file mode 100644
index 000000000..934e58399
--- /dev/null
+++ b/arch/xtensa/include/asm/io.h
@@ -0,0 +1,62 @@
+/*
+ * include/asm-xtensa/io.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IO_H
+#define _XTENSA_IO_H
+
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <asm/vectors.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/pgtable.h>
+
+#include <linux/types.h>
+
+#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
+#define IO_SPACE_LIMIT ~0
+#define PCI_IOBASE ((void __iomem *)XCHAL_KIO_BYPASS_VADDR)
+
+#ifdef CONFIG_MMU
+/*
+ * I/O memory mapping functions.
+ */
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot);
+#define ioremap_prot ioremap_prot
+#define iounmap iounmap
+
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+{
+ if (offset >= XCHAL_KIO_PADDR
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
+ return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
+ else
+ return ioremap_prot(offset, size,
+ pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+}
+#define ioremap ioremap
+
+static inline void __iomem *ioremap_cache(unsigned long offset,
+ unsigned long size)
+{
+ if (offset >= XCHAL_KIO_PADDR
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
+ return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
+ else
+ return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL));
+
+}
+#define ioremap_cache ioremap_cache
+#endif /* CONFIG_MMU */
+
+#include <asm-generic/io.h>
+
+#endif /* _XTENSA_IO_H */
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
new file mode 100644
index 000000000..0f71a51da
--- /dev/null
+++ b/arch/xtensa/include/asm/irq.h
@@ -0,0 +1,42 @@
+/*
+ * include/asm-xtensa/irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IRQ_H
+#define _XTENSA_IRQ_H
+
+#include <linux/init.h>
+#include <asm/core.h>
+
+#ifdef CONFIG_PLATFORM_NR_IRQS
+# define PLATFORM_NR_IRQS CONFIG_PLATFORM_NR_IRQS
+#else
+# define PLATFORM_NR_IRQS 0
+#endif
+#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
+#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return (irq);
+}
+
+struct irqaction;
+struct irq_domain;
+
+void migrate_irqs(void);
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+ unsigned long int_irq, unsigned long ext_irq,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw);
+unsigned xtensa_map_ext_irq(unsigned ext_irq);
+unsigned xtensa_get_ext_irq_no(unsigned irq);
+
+#endif /* _XTENSA_IRQ_H */
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
new file mode 100644
index 000000000..128906815
--- /dev/null
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -0,0 +1,83 @@
+/*
+ * Xtensa IRQ flags handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_IRQFLAGS_H
+#define _XTENSA_IRQFLAGS_H
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <asm/processor.h>
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ asm volatile("rsr %0, ps" : "=a" (flags));
+ return flags;
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_MISC) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+ unsigned long tmp;
+
+ asm volatile("rsr %0, ps\t\n"
+ "extui %1, %0, 0, 4\t\n"
+ "bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n"
+ "rsil %0, "__stringify(LOCKLEVEL)"\n"
+ "1:"
+ : "=a" (flags), "=a" (tmp) :: "memory");
+#else
+ asm volatile("rsr %0, ps\t\n"
+ "or %0, %0, %1\t\n"
+ "xsr %0, ps\t\n"
+ "rsync"
+ : "=&a" (flags) : "a" (LOCKLEVEL) : "memory");
+#endif
+#else
+ asm volatile("rsil %0, "__stringify(LOCKLEVEL)
+ : "=a" (flags) :: "memory");
+#endif
+ return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ arch_local_irq_save();
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long flags;
+ asm volatile("rsil %0, 0" : "=a" (flags) :: "memory");
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ asm volatile("wsr %0, ps; rsync"
+ :: "a" (flags) : "memory");
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL
+#error "XCHAL_EXCM_LEVEL and 1<<PS_EXCM_BIT must be no less than LOCKLEVEL"
+#endif
+ return (flags & (PS_INTLEVEL_MASK | (1 << PS_EXCM_BIT))) >= LOCKLEVEL;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* _XTENSA_IRQFLAGS_H */
diff --git a/arch/xtensa/include/asm/jump_label.h b/arch/xtensa/include/asm/jump_label.h
new file mode 100644
index 000000000..c812bf850
--- /dev/null
+++ b/arch/xtensa/include/asm/jump_label.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Cadence Design Systems Inc. */
+
+#ifndef _ASM_XTENSA_JUMP_LABEL_H
+#define _ASM_XTENSA_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+#define JUMP_LABEL_NOP_SIZE 3
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "_nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+{
+ /*
+ * Xtensa assembler will mark certain points in the code
+ * as unreachable, so that later assembler or linker relaxation
+ * passes could use them. A spot right after the J instruction
+ * is one such point. Assembler and/or linker may insert padding
+ * or literals here, breaking code flow in case the J instruction
+ * is later replaced with NOP. Put a label right after the J to
+ * make it reachable and wrap both into a no-transform block
+ * to avoid any assembler interference with this.
+ */
+ asm_volatile_goto("1:\n\t"
+ ".begin no-transform\n\t"
+ "_j %l[l_yes]\n\t"
+ "2:\n\t"
+ ".end no-transform\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h
new file mode 100644
index 000000000..216b6f32c
--- /dev/null
+++ b/arch/xtensa/include/asm/kasan.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <asm/kmem_layout.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+/* Start of area covered by KASAN */
+#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
+/* Start of the shadow map */
+#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
+/* Size of the shadow map */
+#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
+/* Offset for mem to shadow address transformation */
+#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+
+void __init kasan_early_init(void);
+void __init kasan_init(void);
+
+#else
+
+static inline void kasan_early_init(void)
+{
+}
+
+static inline void kasan_init(void)
+{
+}
+
+#endif
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
new file mode 100644
index 000000000..6fc05cba6
--- /dev/null
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -0,0 +1,104 @@
+/*
+ * Kernel virtual memory layout definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_KMEM_LAYOUT_H
+#define _XTENSA_KMEM_LAYOUT_H
+
+#include <asm/core.h>
+#include <asm/types.h>
+
+#ifdef CONFIG_MMU
+
+/*
+ * Fixed TLB translations in the processor.
+ */
+
+#define XCHAL_PAGE_TABLE_VADDR __XTENSA_UL_CONST(0x80000000)
+#define XCHAL_PAGE_TABLE_SIZE __XTENSA_UL_CONST(0x00400000)
+
+#if defined(CONFIG_XTENSA_KSEG_MMU_V2)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_TLB_WAY 5
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_256M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xb0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_512M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xa0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#else
+#error Unsupported KSEG configuration
+#endif
+
+#ifdef CONFIG_KSEG_PADDR
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(CONFIG_KSEG_PADDR)
+#else
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
+#endif
+
+#if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1)
+#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
+#endif
+
+#endif
+
+/* KIO definition */
+
+#if XCHAL_HAVE_PTP_MMU
+#define XCHAL_KIO_CACHED_VADDR 0xe0000000
+#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
+#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#else
+#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
+#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
+#endif
+#define XCHAL_KIO_SIZE 0x10000000
+
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
+#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
+#ifndef __ASSEMBLY__
+extern unsigned long xtensa_kio_paddr;
+
+static inline unsigned long xtensa_get_kio_paddr(void)
+{
+ return xtensa_kio_paddr;
+}
+#endif
+#else
+#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
+#endif
+
+/* KERNEL_STACK definition */
+
+#ifndef CONFIG_KASAN
+#define KERNEL_STACK_SHIFT 13
+#else
+#define KERNEL_STACK_SHIFT 15
+#endif
+#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT)
+
+#endif
diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h
new file mode 100644
index 000000000..0ba997323
--- /dev/null
+++ b/arch/xtensa/include/asm/linkage.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif
diff --git a/arch/xtensa/include/asm/mmu.h b/arch/xtensa/include/asm/mmu.h
new file mode 100644
index 000000000..71afe418d
--- /dev/null
+++ b/arch/xtensa/include/asm/mmu.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMU_H
+#define _XTENSA_MMU_H
+
+#ifndef CONFIG_MMU
+#include <asm-generic/mmu.h>
+#else
+
+typedef struct {
+ unsigned long asid[NR_CPUS];
+ unsigned int cpu;
+} mm_context_t;
+
+#endif /* CONFIG_MMU */
+#endif /* _XTENSA_MMU_H */
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
new file mode 100644
index 000000000..e337ba968
--- /dev/null
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -0,0 +1,154 @@
+/*
+ * Switch an MMU context.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMU_CONTEXT_H
+#define _XTENSA_MMU_CONTEXT_H
+
+#ifndef CONFIG_MMU
+#include <asm/nommu_context.h>
+#else
+
+#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/pgtable.h>
+
+#include <asm/vectors.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/mm_hooks.h>
+#include <asm-generic/percpu.h>
+
+#if (XCHAL_HAVE_TLBS != 1)
+# error "Linux must have an MMU!"
+#endif
+
+DECLARE_PER_CPU(unsigned long, asid_cache);
+#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
+
+/*
+ * NO_CONTEXT is the invalid ASID value that we don't ever assign to
+ * any user or kernel context. We use the reserved values in the
+ * ASID_INSERT macro below.
+ *
+ * 0 invalid
+ * 1 kernel
+ * 2 reserved
+ * 3 reserved
+ * 4...255 available
+ */
+
+#define NO_CONTEXT 0
+#define ASID_USER_FIRST 4
+#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
+#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
+
+void init_mmu(void);
+void init_kio(void);
+
+static inline void set_rasid_register (unsigned long val)
+{
+ __asm__ __volatile__ (" wsr %0, rasid\n\t"
+ " isync\n" : : "a" (val));
+}
+
+static inline unsigned long get_rasid_register (void)
+{
+ unsigned long tmp;
+ __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
+ return tmp;
+}
+
+static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ unsigned long asid = cpu_asid_cache(cpu);
+ if ((++asid & ASID_MASK) == 0) {
+ /*
+ * Start new asid cycle; continue counting with next
+ * incarnation bits; skipping over 0, 1, 2, 3.
+ */
+ local_flush_tlb_all();
+ asid += ASID_USER_FIRST;
+ }
+ cpu_asid_cache(cpu) = asid;
+ mm->context.asid[cpu] = asid;
+ mm->context.cpu = cpu;
+}
+
+static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ /*
+ * Check if our ASID is of an older version and thus invalid.
+ */
+
+ if (mm) {
+ unsigned long asid = mm->context.asid[cpu];
+
+ if (asid == NO_CONTEXT ||
+ ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
+ get_new_mmu_context(mm, cpu);
+ }
+}
+
+static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
+{
+ get_mmu_context(mm, cpu);
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
+ invalidate_page_directory();
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
+ * to -1 says the process has never run on any core.
+ */
+
+#define init_new_context init_new_context
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ mm->context.asid[cpu] = NO_CONTEXT;
+ }
+ mm->context.cpu = -1;
+ return 0;
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+ int migrated = next->context.cpu != cpu;
+ /* Flush the icache if we migrated to a new core. */
+ if (migrated) {
+ __invalidate_icache_all();
+ next->context.cpu = cpu;
+ }
+ if (migrated || prev != next)
+ activate_context(next, cpu);
+}
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+#define destroy_context destroy_context
+static inline void destroy_context(struct mm_struct *mm)
+{
+ invalidate_page_directory();
+}
+
+
+#include <asm-generic/mmu_context.h>
+
+#endif /* CONFIG_MMU */
+#endif /* _XTENSA_MMU_CONTEXT_H */
diff --git a/arch/xtensa/include/asm/mtd-xip.h b/arch/xtensa/include/asm/mtd-xip.h
new file mode 100644
index 000000000..514325155
--- /dev/null
+++ b/arch/xtensa/include/asm/mtd-xip.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_MTD_XIP_H
+#define _ASM_MTD_XIP_H
+
+#include <asm/processor.h>
+
+#define xip_irqpending() (xtensa_get_sr(interrupt) & xtensa_get_sr(intenable))
+#define xip_currtime() (xtensa_get_sr(ccount))
+#define xip_elapsed_since(x) ((xtensa_get_sr(ccount) - (x)) / 1000) /* should work up to 1GHz */
+#define xip_cpu_idle() do { asm volatile ("waiti 0"); } while (0)
+
+#endif /* _ASM_MTD_XIP_H */
+
diff --git a/arch/xtensa/include/asm/mxregs.h b/arch/xtensa/include/asm/mxregs.h
new file mode 100644
index 000000000..73dcc5456
--- /dev/null
+++ b/arch/xtensa/include/asm/mxregs.h
@@ -0,0 +1,46 @@
+/*
+ * Xtensa MX interrupt distributor
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MXREGS_H
+#define _XTENSA_MXREGS_H
+
+/*
+ * RER/WER at, as Read/write external register
+ * at: value
+ * as: address
+ *
+ * Address Value
+ * 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p
+ * 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p
+ * 0180 0...0m..m Clear enable specified by mask (m)
+ * 0184 0...0m..m Set enable specified by mask (m)
+ * 0190 0...0x..x 8-bit IPI partition register
+ * VVVVVVVVPPPPUUUUUUUUUUUUUUUUU
+ * V (10-bit) Release/Version
+ * P ( 4-bit) Number of cores - 1
+ * U (18-bit) ID
+ * 01a0 i.......i 32-bit ConfigID
+ * 0200 0...0m..m RunStall core 'n'
+ * 0220 c Cache coherency enabled
+ */
+
+#define MIROUT(irq) (0x000 + (irq))
+#define MIPICAUSE(cpu) (0x100 + (cpu))
+#define MIPISET(cause) (0x140 + (cause))
+#define MIENG 0x180
+#define MIENGSET 0x184
+#define MIASG 0x188 /* Read Global Assert Register */
+#define MIASGSET 0x18c /* Set Global Addert Regiter */
+#define MIPIPART 0x190
+#define SYSCFGID 0x1a0
+#define MPSCORE 0x200
+#define CCON 0x220
+
+#endif /* _XTENSA_MXREGS_H */
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
new file mode 100644
index 000000000..7c9d1918d
--- /dev/null
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+static inline void init_mmu(void)
+{
+}
+
+static inline void init_kio(void)
+{
+}
+
+#include <asm-generic/nommu_context.h>
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
new file mode 100644
index 000000000..a77d04972
--- /dev/null
+++ b/arch/xtensa/include/asm/page.h
@@ -0,0 +1,203 @@
+/*
+ * include/asm-xtensa/page.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PAGE_H
+#define _XTENSA_PAGE_H
+
+#include <linux/const.h>
+
+#include <asm/processor.h>
+#include <asm/types.h>
+#include <asm/cache.h>
+#include <asm/kmem_layout.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef CONFIG_MMU
+#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
+#define PHYS_OFFSET XCHAL_KSEG_PADDR
+#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
+ PHYS_PFN(XCHAL_KSEG_SIZE))
+#else
+#define PAGE_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
+#define PHYS_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
+#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
+#endif
+
+/*
+ * Cache aliasing:
+ *
+ * If the cache size for one way is greater than the page size, we have to
+ * deal with cache aliasing. The cache index is wider than the page size:
+ *
+ * | |cache| cache index
+ * | pfn |off| virtual address
+ * |xxxx:X|zzz|
+ * | : | |
+ * | \ / | |
+ * |trans.| |
+ * | / \ | |
+ * |yyyy:Y|zzz| physical address
+ *
+ * When the page number is translated to the physical page address, the lowest
+ * bit(s) (X) that are part of the cache index are also translated (Y).
+ * If this translation changes bit(s) (X), the cache index is also afected,
+ * thus resulting in a different cache line than before.
+ * The kernel does not provide a mechanism to ensure that the page color
+ * (represented by this bit) remains the same when allocated or when pages
+ * are remapped. When user pages are mapped into kernel space, the color of
+ * the page might also change.
+ *
+ * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
+ * to temporarily map a patch so we can match the color.
+ */
+
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
+# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
+# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
+# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
+#else
+# define DCACHE_ALIAS_ORDER 0
+# define DCACHE_ALIAS(a) ((void)(a), 0)
+#endif
+#define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER)
+
+#if ICACHE_WAY_SIZE > PAGE_SIZE
+# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
+# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
+# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
+# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
+#else
+# define ICACHE_ALIAS_ORDER 0
+#endif
+
+
+#ifdef __ASSEMBLY__
+
+#define __pgprot(x) (x)
+
+#else
+
+/*
+ * These are used to make use of C type-checking..
+ */
+
+typedef struct { unsigned long pte; } pte_t; /* page table entry */
+typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+/*
+ * Pure 2^n version of get_order
+ * Use 'nsau' instructions if supported by the processor or the generic version.
+ */
+
+#if XCHAL_HAVE_NSA
+
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ int lz;
+ asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
+ return 32 - lz;
+}
+
+#else
+
+# include <asm-generic/getorder.h>
+
+#endif
+
+struct page;
+struct vm_area_struct;
+extern void clear_page(void *page);
+extern void copy_page(void *to, void *from);
+
+/*
+ * If we have cache aliasing and writeback caches, we might have to do
+ * some extra work
+ */
+
+#if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
+extern void clear_page_alias(void *vaddr, unsigned long paddr);
+extern void copy_page_alias(void *to, void *from,
+ unsigned long to_paddr, unsigned long from_paddr);
+
+#define clear_user_highpage clear_user_highpage
+void clear_user_highpage(struct page *page, unsigned long vaddr);
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+#else
+# define clear_user_page(page, vaddr, pg) clear_page(page)
+# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+#endif
+
+/*
+ * This handles the memory map. We handle pages at
+ * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
+ * These macros are for conversion of kernel address, not user
+ * addresses.
+ */
+
+#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
+#ifdef CONFIG_MMU
+static inline unsigned long ___pa(unsigned long va)
+{
+ unsigned long off = va - PAGE_OFFSET;
+
+ if (off >= XCHAL_KSEG_SIZE)
+ off -= XCHAL_KSEG_SIZE;
+
+#ifndef CONFIG_XIP_KERNEL
+ return off + PHYS_OFFSET;
+#else
+ if (off < XCHAL_KSEG_SIZE)
+ return off + PHYS_OFFSET;
+
+ off -= XCHAL_KSEG_SIZE;
+ if (off >= XCHAL_KIO_SIZE)
+ off -= XCHAL_KIO_SIZE;
+
+ return off + XCHAL_KIO_PADDR;
+#endif
+}
+#define __pa(x) ___pa((unsigned long)(x))
+#else
+#define __pa(x) \
+ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#endif
+#define __va(x) \
+ ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#endif /* __ASSEMBLY__ */
+
+#include <asm-generic/memory_model.h>
+#endif /* _XTENSA_PAGE_H */
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
new file mode 100644
index 000000000..e320aa5bb
--- /dev/null
+++ b/arch/xtensa/include/asm/pci-bridge.h
@@ -0,0 +1,76 @@
+/*
+ * include/asm-xtensa/pci-bridge.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PCI_BRIDGE_H
+#define _XTENSA_PCI_BRIDGE_H
+
+struct device_node;
+struct pci_controller;
+
+/*
+ * pciauto_bus_scan() enumerates the pci space.
+ */
+
+extern int pciauto_bus_scan(struct pci_controller *, int);
+
+struct pci_space {
+ unsigned long start;
+ unsigned long end;
+ unsigned long base;
+};
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+
+struct pci_controller {
+ int index; /* used for pci_controller_num */
+ struct pci_controller *next;
+ struct pci_bus *bus;
+ void *arch_data;
+
+ int first_busno;
+ int last_busno;
+
+ struct pci_ops *ops;
+ volatile unsigned int *cfg_addr;
+ volatile unsigned char *cfg_data;
+
+ /* Currently, we limit ourselves to 1 IO range and 3 mem
+ * ranges since the common pci_bus structure can't handle more
+ */
+ struct resource io_resource;
+ struct resource mem_resources[3];
+ int mem_resource_count;
+
+ /* Host bridge I/O and Memory space
+ * Used for BAR placement algorithms
+ */
+ struct pci_space io_space;
+ struct pci_space mem_space;
+
+ /* Return the interrupt number fo a device. */
+ int (*map_irq)(struct pci_dev*, u8, u8);
+
+};
+
+static inline void pcibios_init_resource(struct resource *res,
+ unsigned long start, unsigned long end, int flags, char *name)
+{
+ res->start = start;
+ res->end = end;
+ res->flags = flags;
+ res->name = name;
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+}
+
+#endif /* _XTENSA_PCI_BRIDGE_H */
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
new file mode 100644
index 000000000..b56de9635
--- /dev/null
+++ b/arch/xtensa/include/asm/pci.h
@@ -0,0 +1,46 @@
+/*
+ * linux/include/asm-xtensa/pci.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PCI_H
+#define _XTENSA_PCI_H
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ * already-configured bus numbers - to be used for buggy BIOSes
+ * or architectures with incomplete PCI setup by the loader
+ */
+
+#define pcibios_assign_all_busses() 0
+
+/* Assume some values. (We should revise them, if necessary) */
+
+#define PCIBIOS_MIN_IO 0x2000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+/* Dynamic DMA mapping stuff.
+ * Xtensa has everything mapped statically like x86.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+/* The PCI address space does equal the physical memory address space.
+ * The networking and block device layers use this boolean for bounce buffer
+ * decisions.
+ */
+
+/* Tell PCI code what kind of PCI resource mappings we support */
+#define HAVE_PCI_MMAP 1
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
+#define arch_can_pci_mmap_io() 1
+
+#endif /* _XTENSA_PCI_H */
diff --git a/arch/xtensa/include/asm/perf_event.h b/arch/xtensa/include/asm/perf_event.h
new file mode 100644
index 000000000..5aa4590ac
--- /dev/null
+++ b/arch/xtensa/include/asm/perf_event.h
@@ -0,0 +1,4 @@
+#ifndef __ASM_XTENSA_PERF_EVENT_H
+#define __ASM_XTENSA_PERF_EVENT_H
+
+#endif /* __ASM_XTENSA_PERF_EVENT_H */
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
new file mode 100644
index 000000000..7fc0f9126
--- /dev/null
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/asm-xtensa/pgalloc.h
+ *
+ * Copyright (C) 2001-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PGALLOC_H
+#define _XTENSA_PGALLOC_H
+
+#ifdef CONFIG_MMU
+#include <linux/highmem.h>
+#include <linux/slab.h>
+
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#include <asm-generic/pgalloc.h>
+
+/*
+ * Allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+
+#define pmd_populate_kernel(mm, pmdp, ptep) \
+ (pmd_val(*(pmdp)) = ((unsigned long)ptep))
+#define pmd_populate(mm, pmdp, page) \
+ (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
+
+static inline pgd_t*
+pgd_alloc(struct mm_struct *mm)
+{
+ return (pgd_t*) __get_free_page(GFP_KERNEL | __GFP_ZERO);
+}
+
+static inline void ptes_clear(pte_t *ptep)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ pte_clear(NULL, 0, ptep + i);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ pte_t *ptep;
+
+ ptep = (pte_t *)__pte_alloc_one_kernel(mm);
+ if (!ptep)
+ return NULL;
+ ptes_clear(ptep);
+ return ptep;
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+ struct page *page;
+
+ page = __pte_alloc_one(mm, GFP_PGTABLE_USER);
+ if (!page)
+ return NULL;
+ ptes_clear(page_address(page));
+ return page;
+}
+
+#endif /* CONFIG_MMU */
+
+#endif /* _XTENSA_PGALLOC_H */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
new file mode 100644
index 000000000..9a7e5e57e
--- /dev/null
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/asm-xtensa/pgtable.h
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PGTABLE_H
+#define _XTENSA_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/kmem_layout.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/*
+ * We only use two ring levels, user and kernel space.
+ */
+
+#ifdef CONFIG_MMU
+#define USER_RING 1 /* user ring level */
+#else
+#define USER_RING 0
+#endif
+#define KERNEL_RING 0 /* kernel ring level */
+
+/*
+ * The Xtensa architecture port of Linux has a two-level page table system,
+ * i.e. the logical three-level Linux page table layout is folded.
+ * Each task has the following memory page tables:
+ *
+ * PGD table (page directory), ie. 3rd-level page table:
+ * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
+ * (Architectures that don't have the PMD folded point to the PMD tables)
+ *
+ * The pointer to the PGD table for a given task can be retrieved from
+ * the task structure (struct task_struct*) t, e.g. current():
+ * (t->mm ? t->mm : t->active_mm)->pgd
+ *
+ * PMD tables (page middle-directory), ie. 2nd-level page tables:
+ * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1).
+ *
+ * PTE tables (page table entry), ie. 1st-level page tables:
+ * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
+ * invalid_pte_table for absent mappings.
+ *
+ * The individual pages are 4 kB big with special pages for the empty_zero_page.
+ */
+
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * Entries per page directory level: we use two-level, so
+ * we don't really have any PMD directory physically.
+ */
+#define PTRS_PER_PTE 1024
+#define PTRS_PER_PTE_SHIFT 10
+#define PTRS_PER_PGD 1024
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
+
+#ifdef CONFIG_MMU
+/*
+ * Virtual memory area. We keep a distance to other memory regions to be
+ * on the safe side. We also use this area for cache aliasing.
+ */
+#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000)
+#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF)
+#define TLBTEMP_BASE_1 (VMALLOC_START + 0x08000000)
+#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
+#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
+#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
+#else
+#define TLBTEMP_SIZE ICACHE_WAY_SIZE
+#endif
+
+#else
+
+#define VMALLOC_START __XTENSA_UL_CONST(0)
+#define VMALLOC_END __XTENSA_UL_CONST(0xffffffff)
+
+#endif
+
+/*
+ * For the Xtensa architecture, the PTE layout is as follows:
+ *
+ * 31------12 11 10-9 8-6 5-4 3-2 1-0
+ * +-----------------------------------------+
+ * | | Software | HARDWARE |
+ * | PPN | ADW | RI |Attribute|
+ * +-----------------------------------------+
+ * pte_none | MBZ | 01 | 11 | 00 |
+ * +-----------------------------------------+
+ * present | PPN | 0 | 00 | ADW | RI | CA | wx |
+ * +- - - - - - - - - - - - - - - - - - - - -+
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 |
+ * +-----------------------------------------+
+ * swap | index | type | 01 | 11 | e0 |
+ * +-----------------------------------------+
+ *
+ * For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
+ * +-----------------------------------------+
+ * present | PPN | 0 | 00 | ADW | RI | CA | w1 |
+ * +-----------------------------------------+
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 01 | 00 |
+ * +-----------------------------------------+
+ *
+ * Legend:
+ * PPN Physical Page Number
+ * ADW software: accessed (young) / dirty / writable
+ * RI ring (0=privileged, 1=user, 2 and 3 are unused)
+ * CA cache attribute: 00 bypass, 01 writeback, 10 writethrough
+ * (11 is invalid and used to mark pages that are not present)
+ * e exclusive marker in swap PTEs
+ * w page is writable (hw)
+ * x page is executable (hw)
+ * index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
+ * (note that the index is always non-zero)
+ * type swap type (5 bits -> 32 types)
+ *
+ * Notes:
+ * - (PROT_NONE) is a special case of 'present' but causes an exception for
+ * any access (read, write, and execute).
+ * - 'multihit-exception' has the highest priority of all MMU exceptions,
+ * so the ring must be set to 'RING_USER' even for 'non-present' pages.
+ * - on older hardware, the exectuable flag was not supported and
+ * used as a 'valid' flag, so it needs to be always set.
+ * - we need to keep track of certain flags in software (dirty and young)
+ * to do this, we use write exceptions and have a separate software w-flag.
+ * - attribute value 1101 (and 1111 on T1050 and earlier) is reserved
+ */
+
+#define _PAGE_ATTRIB_MASK 0xf
+
+#define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
+#define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
+
+#define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
+#define _PAGE_CA_WB (1<<2) /* write-back */
+#define _PAGE_CA_WT (2<<2) /* write-through */
+#define _PAGE_CA_MASK (3<<2)
+#define _PAGE_CA_INVALID (3<<2)
+
+/* We use invalid attribute values to distinguish special pte entries */
+#if XCHAL_HW_VERSION_MAJOR < 2000
+#define _PAGE_HW_VALID 0x01 /* older HW needed this bit set */
+#define _PAGE_NONE 0x04
+#else
+#define _PAGE_HW_VALID 0x00
+#define _PAGE_NONE 0x0f
+#endif
+
+#define _PAGE_USER (1<<4) /* user access (ring=1) */
+
+/* Software */
+#define _PAGE_WRITABLE_BIT 6
+#define _PAGE_WRITABLE (1<<6) /* software: page writable */
+#define _PAGE_DIRTY (1<<7) /* software: page dirty */
+#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
+
+/* We borrow bit 1 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE (1<<1)
+
+#ifdef CONFIG_MMU
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
+
+#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
+#define PAGE_SHARED_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
+#else
+# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
+#endif
+
+#else /* no mmu */
+
+# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+# define PAGE_NONE __pgprot(0)
+# define PAGE_SHARED __pgprot(0)
+# define PAGE_COPY __pgprot(0)
+# define PAGE_READONLY __pgprot(0)
+# define PAGE_KERNEL __pgprot(0)
+
+#endif
+
+/*
+ * On certain configurations of Xtensa MMUs (eg. the initial Linux config),
+ * the MMU can't do page protection for execute, and considers that the same as
+ * read. Also, write permissions may imply read permissions.
+ * What follows is the closest we can get by reasonable means..
+ * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
+ */
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern unsigned long empty_zero_page[1024];
+
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#ifdef CONFIG_MMU
+extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
+extern void paging_init(void);
+#else
+# define swapper_pg_dir NULL
+static inline void paging_init(void) { }
+#endif
+
+/*
+ * The pmd contains the kernel virtual address of the pte page.
+ */
+#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
+#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
+#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
+
+/*
+ * pte status.
+ */
+# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
+#if XCHAL_HW_VERSION_MAJOR < 2000
+# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
+#else
+# define pte_present(pte) \
+ (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
+ || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
+#endif
+#define pte_clear(mm,addr,ptep) \
+ do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
+
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+ { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
+static inline pte_t pte_mkclean(pte_t pte)
+ { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
+static inline pte_t pte_mkold(pte_t pte)
+ { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)
+ { pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)
+ { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite_novma(pte_t pte)
+ { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
+
+#define pgprot_noncached(prot) \
+ ((__pgprot((pgprot_val(prot) & ~_PAGE_CA_MASK) | \
+ _PAGE_CA_BYPASS)))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define PFN_PTE_SHIFT PAGE_SHIFT
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pte_same(a,b) (pte_val(a) == pte_val(b))
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/*
+ * Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+static inline void update_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+ __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
+#endif
+
+}
+
+struct mm_struct;
+
+static inline void set_pte(pte_t *ptep, pte_t pte)
+{
+ update_pte(ptep, pte);
+}
+
+static inline void
+set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ *pmdp = pmdval;
+}
+
+struct vm_area_struct;
+
+static inline int
+ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_young(pte))
+ return 0;
+ update_pte(ptep, pte_mkold(pte));
+ return 1;
+}
+
+static inline pte_t
+ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, addr, ptep);
+ return pte;
+}
+
+static inline void
+ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ update_pte(ptep, pte_wrprotect(pte));
+}
+
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ */
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
+
+#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
+#define __swp_offset(entry) ((entry).val >> 11)
+#define __swp_entry(type,offs) \
+ ((swp_entry_t){(((type) & 0x1f) << 6) | ((offs) << 11) | \
+ _PAGE_CA_INVALID | _PAGE_USER})
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
+#endif /* !defined (__ASSEMBLY__) */
+
+
+#ifdef __ASSEMBLY__
+
+/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
+ * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
+ * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long)
+ * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long)
+ *
+ * Note: We require an additional temporary register which can be the same as
+ * the register that holds the address.
+ *
+ * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
+ *
+ */
+#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
+#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
+
+#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
+ _PGD_INDEX(tmp, adr); \
+ addx4 mm, tmp, mm
+
+#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
+ srli pmd, pmd, PAGE_SHIFT; \
+ slli pmd, pmd, PAGE_SHIFT; \
+ addx4 pmd, tmp, pmd
+
+#else
+
+struct vm_fault;
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, unsigned int nr);
+#define update_mmu_cache(vma, address, ptep) \
+ update_mmu_cache_range(NULL, vma, address, ptep, 1)
+
+typedef pte_t *pte_addr_t;
+
+void update_mmu_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+
+#endif /* !defined (__ASSEMBLY__) */
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+/* We provide our own get_unmapped_area to cope with
+ * SHM area cache aliasing for userland.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#endif /* _XTENSA_PGTABLE_H */
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
new file mode 100644
index 000000000..94f13fabf
--- /dev/null
+++ b/arch/xtensa/include/asm/platform.h
@@ -0,0 +1,46 @@
+/*
+ * Platform specific functions
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PLATFORM_H
+#define _XTENSA_PLATFORM_H
+
+#include <linux/types.h>
+#include <asm/bootparam.h>
+
+/*
+ * platform_init is called before the mmu is initialized to give the
+ * platform a early hook-up. bp_tag_t is a list of configuration tags
+ * passed from the boot-loader.
+ */
+extern void platform_init(bp_tag_t*);
+
+/*
+ * platform_setup is called from setup_arch with a pointer to the command-line
+ * string.
+ */
+extern void platform_setup (char **);
+
+/*
+ * platform_idle is called from the idle function.
+ */
+extern void platform_idle (void);
+
+/*
+ * platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
+ */
+extern void platform_calibrate_ccount (void);
+
+/*
+ * Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector.
+ */
+void cpu_reset(void) __attribute__((noreturn));
+
+#endif /* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
new file mode 100644
index 000000000..d008a153a
--- /dev/null
+++ b/arch/xtensa/include/asm/processor.h
@@ -0,0 +1,269 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_PROCESSOR_H
+#define _XTENSA_PROCESSOR_H
+
+#include <asm/core.h>
+
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+
+#include <asm/bootparam.h>
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#include <asm/regs.h>
+
+#define ARCH_SLAB_MINALIGN XTENSA_STACK_ALIGNMENT
+
+/*
+ * User space process size: 1 GB.
+ * Windowed call ABI requires caller and callee to be located within the same
+ * 1 GB region. The C compiler places trampoline code on the stack for sources
+ * that take the address of a nested C function (a feature used by glibc), so
+ * the 1 GB requirement applies to the stack as well.
+ */
+
+#ifdef CONFIG_MMU
+#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
+#else
+#define TASK_SIZE __XTENSA_UL_CONST(0xffffffff)
+#endif
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+/*
+ * General exception cause assigned to fake NMI. Fake NMI needs to be handled
+ * differently from other interrupts, but it uses common kernel entry/exit
+ * code.
+ */
+
+#define EXCCAUSE_MAPPED_NMI 62
+
+/*
+ * General exception cause assigned to debug exceptions. Debug exceptions go
+ * to their own vector, rather than the general exception vectors (user,
+ * kernel, double); and their specific causes are reported via DEBUGCAUSE
+ * rather than EXCCAUSE. However it is sometimes convenient to redirect debug
+ * exceptions to the general exception mechanism. To do this, an otherwise
+ * unused EXCCAUSE value was assigned to debug exceptions for this purpose.
+ */
+
+#define EXCCAUSE_MAPPED_DEBUG 63
+
+/*
+ * We use DEPC also as a flag to distinguish between double and regular
+ * exceptions. For performance reasons, DEPC might contain the value of
+ * EXCCAUSE for regular exceptions, so we use this definition to mark a
+ * valid double exception address.
+ * (Note: We use it in bgeui, so it should be 64, 128, or 256)
+ */
+
+#define VALID_DOUBLE_EXCEPTION_ADDRESS 64
+
+#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
+#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
+
+#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
+#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
+
+#define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
+#define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)
+
+#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
+
+/* LOCKLEVEL defines the interrupt level that masks all
+ * general-purpose interrupts.
+ */
+#if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
+#define LOCKLEVEL (PROFILING_INTLEVEL - 1)
+#else
+#define LOCKLEVEL XCHAL_EXCM_LEVEL
+#endif
+
+#define TOPLEVEL XCHAL_EXCM_LEVEL
+#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
+
+/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
+ * registers
+ */
+#define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */
+#define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+#define KERNEL_PS_WOE_MASK PS_WOE_MASK
+#elif defined(__XTENSA_CALL0_ABI__)
+#define KERNEL_PS_WOE_MASK 0
+#else
+#error Unsupported xtensa ABI
+#endif
+
+#ifndef __ASSEMBLY__
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+
+/* Build a valid return address for the specified call winsize.
+ * winsize must be 1 (call4), 2 (call8), or 3 (call12)
+ */
+#define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
+
+/* Convert return address to a valid pc
+ * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ */
+#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
+
+#elif defined(__XTENSA_CALL0_ABI__)
+
+/* Build a valid return address for the specified call winsize.
+ * winsize must be 1 (call4), 2 (call8), or 3 (call12)
+ */
+#define MAKE_RA_FOR_CALL(ra, ws) (ra)
+
+/* Convert return address to a valid pc
+ * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ */
+#define MAKE_PC_FROM_RA(ra, sp) (ra)
+
+#else
+#error Unsupported Xtensa ABI
+#endif
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp. reg must be in the range [0..4).
+ */
+#define SPILL_SLOT(sp, reg) (*(((unsigned long *)(sp)) - 4 + (reg)))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp for the call8. reg must be in the range [4..8).
+ */
+#define SPILL_SLOT_CALL8(sp, reg) (*(((unsigned long *)(sp)) - 12 + (reg)))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp for the call12. reg must be in the range [4..12).
+ */
+#define SPILL_SLOT_CALL12(sp, reg) (*(((unsigned long *)(sp)) - 16 + (reg)))
+
+struct thread_struct {
+
+ /* kernel's return address and stack pointer for context switching */
+ unsigned long ra; /* kernel's a0: return address and window call size */
+ unsigned long sp; /* kernel's a1: stack pointer */
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK];
+ struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK];
+#endif
+ /* Make structure 16 bytes aligned. */
+ int align[0] __attribute__ ((aligned(16)));
+};
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
+
+#define INIT_THREAD \
+{ \
+ ra: 0, \
+ sp: sizeof(init_stack) + (long) &init_stack, \
+}
+
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ * Note: When windowed ABI is used for userspace we set-up ps
+ * as if we did a call4 to the new pc.
+ * set_thread_state in signal.c depends on it.
+ */
+#if IS_ENABLED(CONFIG_USER_ABI_CALL0)
+#define USER_PS_VALUE ((USER_RING << PS_RING_SHIFT) | \
+ (1 << PS_UM_BIT) | \
+ (1 << PS_EXCM_BIT))
+#else
+#define USER_PS_VALUE (PS_WOE_MASK | \
+ (1 << PS_CALLINC_SHIFT) | \
+ (USER_RING << PS_RING_SHIFT) | \
+ (1 << PS_UM_BIT) | \
+ (1 << PS_EXCM_BIT))
+#endif
+
+/* Clearing a0 terminates the backtrace. */
+#define start_thread(regs, new_pc, new_sp) \
+ do { \
+ unsigned long syscall = (regs)->syscall; \
+ unsigned long current_aregs[16]; \
+ memcpy(current_aregs, (regs)->areg, sizeof(current_aregs)); \
+ memset((regs), 0, sizeof(*(regs))); \
+ (regs)->pc = (new_pc); \
+ (regs)->ps = USER_PS_VALUE; \
+ memcpy((regs)->areg, current_aregs, sizeof(current_aregs)); \
+ (regs)->areg[1] = (new_sp); \
+ (regs)->areg[0] = 0; \
+ (regs)->wmask = 1; \
+ (regs)->depc = 0; \
+ (regs)->windowbase = 0; \
+ (regs)->windowstart = 1; \
+ (regs)->syscall = syscall; \
+ } while (0)
+
+/* Forward declaration */
+struct task_struct;
+struct mm_struct;
+
+extern unsigned long __get_wchan(struct task_struct *p);
+
+void init_arch(bp_tag_t *bp_start);
+void do_notify_resume(struct pt_regs *regs);
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
+
+#define cpu_relax() barrier()
+
+/* Special register access. */
+
+#define xtensa_set_sr(x, sr) \
+ ({ \
+ __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: \
+ "a"((unsigned int)(x))); \
+ })
+
+#define xtensa_get_sr(sr) \
+ ({ \
+ unsigned int v; \
+ __asm__ __volatile__ ("rsr %0, "__stringify(sr) : "=a"(v)); \
+ v; \
+ })
+
+#define xtensa_xsr(x, sr) \
+ ({ \
+ unsigned int __v__ = (unsigned int)(x); \
+ __asm__ __volatile__ ("xsr %0, " __stringify(sr) : "+a"(__v__)); \
+ __v__; \
+ })
+
+#if XCHAL_HAVE_EXTERN_REGS
+
+static inline void set_er(unsigned long value, unsigned long addr)
+{
+ asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory");
+}
+
+static inline unsigned long get_er(unsigned long addr)
+{
+ register unsigned long value;
+ asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory");
+ return value;
+}
+
+#endif /* XCHAL_HAVE_EXTERN_REGS */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_PROCESSOR_H */
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
new file mode 100644
index 000000000..a27046755
--- /dev/null
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -0,0 +1,119 @@
+/*
+ * include/asm-xtensa/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _XTENSA_PTRACE_H
+#define _XTENSA_PTRACE_H
+
+#include <asm/kmem_layout.h>
+#include <uapi/asm/ptrace.h>
+
+/*
+ * Kernel stack
+ *
+ * +-----------------------+ -------- STACK_SIZE
+ * | register file | |
+ * +-----------------------+ |
+ * | struct pt_regs | |
+ * +-----------------------+ | ------ PT_REGS_OFFSET
+ * double : 16 bytes spill area : | ^
+ * excetion :- - - - - - - - - - - -: | |
+ * frame : struct pt_regs : | |
+ * :- - - - - - - - - - - -: | |
+ * | | | |
+ * | memory stack | | |
+ * | | | |
+ * ~ ~ ~ ~
+ * ~ ~ ~ ~
+ * | | | |
+ * | | | |
+ * +-----------------------+ | | --- STACK_BIAS
+ * | struct task_struct | | | ^
+ * current --> +-----------------------+ | | |
+ * | struct thread_info | | | |
+ * +-----------------------+ --------
+ */
+
+#define NO_SYSCALL (-1)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/coprocessor.h>
+#include <asm/core.h>
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ */
+struct pt_regs {
+ unsigned long pc; /* 4 */
+ unsigned long ps; /* 8 */
+ unsigned long depc; /* 12 */
+ unsigned long exccause; /* 16 */
+ unsigned long excvaddr; /* 20 */
+ unsigned long debugcause; /* 24 */
+ unsigned long wmask; /* 28 */
+ unsigned long lbeg; /* 32 */
+ unsigned long lend; /* 36 */
+ unsigned long lcount; /* 40 */
+ unsigned long sar; /* 44 */
+ unsigned long windowbase; /* 48 */
+ unsigned long windowstart; /* 52 */
+ unsigned long syscall; /* 56 */
+ unsigned long icountlevel; /* 60 */
+ unsigned long scompare1; /* 64 */
+ unsigned long threadptr; /* 68 */
+
+ /* Additional configurable registers that are used by the compiler. */
+ xtregs_opt_t xtregs_opt;
+
+ /* Make sure the areg field is 16 bytes aligned. */
+ int align[0] __attribute__ ((aligned(16)));
+
+ /* current register frame.
+ * Note: The ESF for kernel exceptions ends after 16 registers!
+ */
+ unsigned long areg[XCHAL_NUM_AREGS];
+};
+
+# define arch_has_single_step() (1)
+# define task_pt_regs(tsk) ((struct pt_regs*) \
+ (task_stack_page(tsk) + KERNEL_STACK_SIZE) - 1)
+# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
+# define instruction_pointer(regs) ((regs)->pc)
+# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
+ (regs)->areg[1]))
+
+# ifndef CONFIG_SMP
+# define profile_pc(regs) instruction_pointer(regs)
+# else
+# define profile_pc(regs) \
+ ({ \
+ in_lock_functions(instruction_pointer(regs)) ? \
+ return_pointer(regs) : instruction_pointer(regs); \
+ })
+# endif
+
+#define user_stack_pointer(regs) ((regs)->areg[1])
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->areg[2];
+}
+
+int do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
+#else /* __ASSEMBLY__ */
+
+# include <asm/asm-offsets.h>
+#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _XTENSA_PTRACE_H */
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
new file mode 100644
index 000000000..ce184e7de
--- /dev/null
+++ b/arch/xtensa/include/asm/regs.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2006 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+#ifndef _XTENSA_REGS_H
+#define _XTENSA_REGS_H
+
+/* Special registers. */
+
+#define SREG_MR 32
+#define SREG_IBREAKENABLE 96
+#define SREG_IBREAKA 128
+#define SREG_DBREAKA 144
+#define SREG_DBREAKC 160
+#define SREG_EPC 176
+#define SREG_EPS 192
+#define SREG_EXCSAVE 208
+#define SREG_CCOMPARE 240
+#define SREG_MISC 244
+
+/* EXCCAUSE register fields */
+
+#define EXCCAUSE_EXCCAUSE_SHIFT 0
+#define EXCCAUSE_EXCCAUSE_MASK 0x3F
+
+#define EXCCAUSE_ILLEGAL_INSTRUCTION 0
+#define EXCCAUSE_SYSTEM_CALL 1
+#define EXCCAUSE_INSTRUCTION_FETCH_ERROR 2
+#define EXCCAUSE_LOAD_STORE_ERROR 3
+#define EXCCAUSE_LEVEL1_INTERRUPT 4
+#define EXCCAUSE_ALLOCA 5
+#define EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6
+#define EXCCAUSE_SPECULATION 7
+#define EXCCAUSE_PRIVILEGED 8
+#define EXCCAUSE_UNALIGNED 9
+#define EXCCAUSE_INSTR_DATA_ERROR 12
+#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
+#define EXCCAUSE_INSTR_ADDR_ERROR 14
+#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
+#define EXCCAUSE_ITLB_MISS 16
+#define EXCCAUSE_ITLB_MULTIHIT 17
+#define EXCCAUSE_ITLB_PRIVILEGE 18
+#define EXCCAUSE_ITLB_SIZE_RESTRICTION 19
+#define EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20
+#define EXCCAUSE_DTLB_MISS 24
+#define EXCCAUSE_DTLB_MULTIHIT 25
+#define EXCCAUSE_DTLB_PRIVILEGE 26
+#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
+#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
+#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
+#define EXCCAUSE_COPROCESSOR0_DISABLED 32
+#define EXCCAUSE_COPROCESSOR1_DISABLED 33
+#define EXCCAUSE_COPROCESSOR2_DISABLED 34
+#define EXCCAUSE_COPROCESSOR3_DISABLED 35
+#define EXCCAUSE_COPROCESSOR4_DISABLED 36
+#define EXCCAUSE_COPROCESSOR5_DISABLED 37
+#define EXCCAUSE_COPROCESSOR6_DISABLED 38
+#define EXCCAUSE_COPROCESSOR7_DISABLED 39
+#define EXCCAUSE_N 64
+
+/* PS register fields. */
+
+#define PS_WOE_BIT 18
+#define PS_WOE_MASK 0x00040000
+#define PS_CALLINC_SHIFT 16
+#define PS_CALLINC_MASK 0x00030000
+#define PS_OWB_SHIFT 8
+#define PS_OWB_WIDTH 4
+#define PS_OWB_MASK 0x00000F00
+#define PS_RING_SHIFT 6
+#define PS_RING_MASK 0x000000C0
+#define PS_UM_BIT 5
+#define PS_EXCM_BIT 4
+#define PS_INTLEVEL_SHIFT 0
+#define PS_INTLEVEL_WIDTH 4
+#define PS_INTLEVEL_MASK 0x0000000F
+
+/* DBREAKCn register fields. */
+
+#define DBREAKC_MASK_BIT 0
+#define DBREAKC_MASK_MASK 0x0000003F
+#define DBREAKC_LOAD_BIT 30
+#define DBREAKC_LOAD_MASK 0x40000000
+#define DBREAKC_STOR_BIT 31
+#define DBREAKC_STOR_MASK 0x80000000
+
+/* DEBUGCAUSE register fields. */
+
+#define DEBUGCAUSE_DBNUM_MASK 0xf00
+#define DEBUGCAUSE_DBNUM_SHIFT 8 /* First bit of DBNUM field */
+#define DEBUGCAUSE_DEBUGINT_BIT 5 /* External debug interrupt */
+#define DEBUGCAUSE_BREAKN_BIT 4 /* BREAK.N instruction */
+#define DEBUGCAUSE_BREAK_BIT 3 /* BREAK instruction */
+#define DEBUGCAUSE_DBREAK_BIT 2 /* DBREAK match */
+#define DEBUGCAUSE_IBREAK_BIT 1 /* IBREAK match */
+#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
+
+#endif /* _XTENSA_SPECREG_H */
diff --git a/arch/xtensa/include/asm/seccomp.h b/arch/xtensa/include/asm/seccomp.h
new file mode 100644
index 000000000..f1cb6b0a9
--- /dev/null
+++ b/arch/xtensa/include/asm/seccomp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm-generic/seccomp.h>
+
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_XTENSA
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "xtensa"
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/xtensa/include/asm/sections.h b/arch/xtensa/include/asm/sections.h
new file mode 100644
index 000000000..e5da6d709
--- /dev/null
+++ b/arch/xtensa/include/asm/sections.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _XTENSA_SECTIONS_H
+#define _XTENSA_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+#ifdef CONFIG_VECTORS_ADDR
+extern char _WindowVectors_text_start[];
+extern char _WindowVectors_text_end[];
+extern char _DebugInterruptVector_text_start[];
+extern char _DebugInterruptVector_text_end[];
+extern char _KernelExceptionVector_text_start[];
+extern char _KernelExceptionVector_text_end[];
+extern char _UserExceptionVector_text_start[];
+extern char _UserExceptionVector_text_end[];
+extern char _DoubleExceptionVector_text_start[];
+extern char _DoubleExceptionVector_text_end[];
+extern char _exception_text_start[];
+extern char _exception_text_end[];
+extern char _Level2InterruptVector_text_start[];
+extern char _Level2InterruptVector_text_end[];
+extern char _Level3InterruptVector_text_start[];
+extern char _Level3InterruptVector_text_end[];
+extern char _Level4InterruptVector_text_start[];
+extern char _Level4InterruptVector_text_end[];
+extern char _Level5InterruptVector_text_start[];
+extern char _Level5InterruptVector_text_end[];
+extern char _Level6InterruptVector_text_start[];
+extern char _Level6InterruptVector_text_end[];
+#endif
+#ifdef CONFIG_SECONDARY_RESET_VECTOR
+extern char _SecondaryResetVector_text_start[];
+extern char _SecondaryResetVector_text_end[];
+#endif
+#ifdef CONFIG_XIP_KERNEL
+#ifdef CONFIG_VECTORS_ADDR
+extern char _xip_text_start[];
+extern char _xip_text_end[];
+#endif
+extern char _xip_start[];
+extern char _xip_end[];
+#endif
+
+#endif
diff --git a/arch/xtensa/include/asm/serial.h b/arch/xtensa/include/asm/serial.h
new file mode 100644
index 000000000..a8a249326
--- /dev/null
+++ b/arch/xtensa/include/asm/serial.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-xtensa/serial.h
+ *
+ * Configuration details for 8250, 16450, 16550, etc. serial ports
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SERIAL_H
+#define _XTENSA_SERIAL_H
+
+#include <platform/serial.h>
+
+#endif /* _XTENSA_SERIAL_H */
diff --git a/arch/xtensa/include/asm/shmparam.h b/arch/xtensa/include/asm/shmparam.h
new file mode 100644
index 000000000..c8cc16c3d
--- /dev/null
+++ b/arch/xtensa/include/asm/shmparam.h
@@ -0,0 +1,21 @@
+/*
+ * include/asm-xtensa/shmparam.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_SHMPARAM_H
+#define _XTENSA_SHMPARAM_H
+
+/*
+ * Xtensa can have variable size caches, and if
+ * the size of single way is larger than the page size,
+ * then we have to start worrying about cache aliasing
+ * problems.
+ */
+
+#define SHMLBA ((PAGE_SIZE > DCACHE_WAY_SIZE)? PAGE_SIZE : DCACHE_WAY_SIZE)
+
+#endif /* _XTENSA_SHMPARAM_H */
diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
new file mode 100644
index 000000000..de169b4ea
--- /dev/null
+++ b/arch/xtensa/include/asm/signal.h
@@ -0,0 +1,23 @@
+/*
+ * include/asm-xtensa/signal.h
+ *
+ * Swiped from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _XTENSA_SIGNAL_H
+#define _XTENSA_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+#ifndef __ASSEMBLY__
+#define __ARCH_HAS_SA_RESTORER
+
+#include <asm/sigcontext.h>
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_SIGNAL_H */
diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h
new file mode 100644
index 000000000..e446e6fc4
--- /dev/null
+++ b/arch/xtensa/include/asm/smp.h
@@ -0,0 +1,44 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SMP_H
+#define _XTENSA_SMP_H
+
+#ifdef CONFIG_SMP
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define cpu_logical_map(cpu) (cpu)
+
+struct start_info {
+ unsigned long stack;
+};
+extern struct start_info start_info;
+
+struct cpumask;
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_single_ipi(int cpu);
+
+void secondary_start_kernel(void);
+void smp_init_cpus(void);
+void secondary_init_irq(void);
+void ipi_init(void);
+struct seq_file;
+void show_ipi_list(struct seq_file *p, int prec);
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+void __cpu_die(unsigned int cpu);
+int __cpu_disable(void);
+void __noreturn cpu_die(void);
+void cpu_restart(void);
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* CONFIG_SMP */
+
+#endif /* _XTENSA_SMP_H */
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
new file mode 100644
index 000000000..41c449ece
--- /dev/null
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -0,0 +1,20 @@
+/*
+ * include/asm-xtensa/spinlock.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SPINLOCK_H
+#define _XTENSA_SPINLOCK_H
+
+#include <asm/barrier.h>
+#include <asm/qspinlock.h>
+#include <asm/qrwlock.h>
+
+#define smp_mb__after_spinlock() smp_mb()
+
+#endif /* _XTENSA_SPINLOCK_H */
diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h
new file mode 100644
index 000000000..797aed7df
--- /dev/null
+++ b/arch/xtensa/include/asm/spinlock_types.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#if !defined(__LINUX_SPINLOCK_TYPES_RAW_H) && !defined(__ASM_SPINLOCK_H)
+# error "please don't include this file directly"
+#endif
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
+
+#endif
diff --git a/arch/xtensa/include/asm/stackprotector.h b/arch/xtensa/include/asm/stackprotector.h
new file mode 100644
index 000000000..dd10279a2
--- /dev/null
+++ b/arch/xtensa/include/asm/stackprotector.h
@@ -0,0 +1,33 @@
+/*
+ * GCC stack protector support.
+ *
+ * (This is directly adopted from the ARM implementation)
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary = get_random_canary();
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h
new file mode 100644
index 000000000..a85e785a6
--- /dev/null
+++ b/arch/xtensa/include/asm/stacktrace.h
@@ -0,0 +1,44 @@
+/*
+ * arch/xtensa/include/asm/stacktrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_STACKTRACE_H
+#define _XTENSA_STACKTRACE_H
+
+#include <linux/sched.h>
+
+struct stackframe {
+ unsigned long pc;
+ unsigned long sp;
+};
+
+static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+{
+ unsigned long sp;
+
+ if (!task || task == current)
+ sp = current_stack_pointer;
+ else
+ sp = task->thread.sp;
+
+ return (unsigned long *)sp;
+}
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data);
+
+void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
+ int (*kfn)(struct stackframe *frame, void *data),
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data);
+void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data);
+
+#endif /* _XTENSA_STACKTRACE_H */
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
new file mode 100644
index 000000000..ffce43513
--- /dev/null
+++ b/arch/xtensa/include/asm/string.h
@@ -0,0 +1,137 @@
+/*
+ * include/asm-xtensa/string.h
+ *
+ * These trivial string functions are considered part of the public domain.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+/* We should optimize these. See arch/xtensa/lib/strncpy_user.S */
+
+#ifndef _XTENSA_STRING_H
+#define _XTENSA_STRING_H
+
+#define __HAVE_ARCH_STRCPY
+static inline char *strcpy(char *__dest, const char *__src)
+{
+ register char *__xdest = __dest;
+ unsigned long __dummy;
+
+ __asm__ __volatile__("1:\n\t"
+ "l8ui %2, %1, 0\n\t"
+ "s8i %2, %0, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "addi %0, %0, 1\n\t"
+ "bnez %2, 1b\n\t"
+ : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
+ : "0" (__dest), "1" (__src)
+ : "memory");
+
+ return __xdest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+static inline char *strncpy(char *__dest, const char *__src, size_t __n)
+{
+ register char *__xdest = __dest;
+ unsigned long __dummy;
+
+ if (__n == 0)
+ return __xdest;
+
+ __asm__ __volatile__(
+ "1:\n\t"
+ "l8ui %2, %1, 0\n\t"
+ "s8i %2, %0, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "bne %1, %5, 1b\n"
+ "2:"
+ : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
+ : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n)
+ : "memory");
+
+ return __xdest;
+}
+
+#define __HAVE_ARCH_STRCMP
+static inline int strcmp(const char *__cs, const char *__ct)
+{
+ register int __res;
+ unsigned long __dummy;
+
+ __asm__ __volatile__(
+ "1:\n\t"
+ "l8ui %3, %1, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "l8ui %2, %0, 0\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "beq %2, %3, 1b\n"
+ "2:\n\t"
+ "sub %2, %2, %3"
+ : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
+ : "0" (__cs), "1" (__ct));
+
+ return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
+{
+ register int __res;
+ unsigned long __dummy;
+
+ __asm__ __volatile__(
+ "mov %2, %3\n"
+ "1:\n\t"
+ "beq %0, %6, 2f\n\t"
+ "l8ui %3, %1, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "l8ui %2, %0, 0\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "beqz %3, 2f\n\t"
+ "beq %2, %3, 1b\n"
+ "2:\n\t"
+ "sub %2, %2, %3"
+ : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
+ : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n));
+
+ return __res;
+}
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+#endif
+
+#endif /* _XTENSA_STRING_H */
diff --git a/arch/xtensa/include/asm/switch_to.h b/arch/xtensa/include/asm/switch_to.h
new file mode 100644
index 000000000..6b73bf0eb
--- /dev/null
+++ b/arch/xtensa/include/asm/switch_to.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SWITCH_TO_H
+#define _XTENSA_SWITCH_TO_H
+
+/* * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ */
+extern void *_switch_to(void *last, void *next);
+
+#define switch_to(prev,next,last) \
+do { \
+ (last) = _switch_to(prev, next); \
+} while(0)
+
+#endif /* _XTENSA_SWITCH_TO_H */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
new file mode 100644
index 000000000..5ee974bf8
--- /dev/null
+++ b/arch/xtensa/include/asm/syscall.h
@@ -0,0 +1,76 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H
+
+#include <linux/err.h>
+#include <asm/ptrace.h>
+#include <uapi/linux/audit.h>
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_XTENSA;
+}
+
+typedef void (*syscall_t)(void);
+extern syscall_t sys_call_table[];
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->syscall;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* Do nothing. */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* 0 if syscall succeeded, otherwise -Errorcode */
+ return IS_ERR_VALUE(regs->areg[2]) ? regs->areg[2] : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->areg[2];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->areg[2] = (long) error ? error : val;
+}
+
+#define SYSCALL_MAX_ARGS 6
+#define XTENSA_SYSCALL_ARGUMENT_REGS {6, 3, 4, 5, 8, 9}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
+ unsigned int i;
+
+ for (i = 0; i < 6; ++i)
+ args[i] = regs->areg[reg[i]];
+}
+
+asmlinkage long xtensa_rt_sigreturn(void);
+asmlinkage long xtensa_shmat(int, char __user *, int);
+asmlinkage long xtensa_fadvise64_64(int, int,
+ unsigned long long, unsigned long long);
+
+#endif
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
new file mode 100644
index 000000000..552cdfd85
--- /dev/null
+++ b/arch/xtensa/include/asm/sysmem.h
@@ -0,0 +1,19 @@
+/*
+ * sysmem-related prototypes.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_SYSMEM_H
+#define _XTENSA_SYSMEM_H
+
+#include <linux/memblock.h>
+
+void bootmem_init(void);
+void zones_init(void);
+
+#endif /* _XTENSA_SYSMEM_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
new file mode 100644
index 000000000..326db1c1d
--- /dev/null
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -0,0 +1,145 @@
+/*
+ * include/asm-xtensa/thread_info.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_THREAD_INFO_H
+#define _XTENSA_THREAD_INFO_H
+
+#include <linux/stringify.h>
+#include <asm/kmem_layout.h>
+
+#define CURRENT_SHIFT KERNEL_STACK_SHIFT
+
+#ifndef __ASSEMBLY__
+# include <asm/processor.h>
+#endif
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+
+#ifndef __ASSEMBLY__
+
+#if XTENSA_HAVE_COPROCESSORS
+
+typedef struct xtregs_coprocessor {
+ xtregs_cp0_t cp0;
+ xtregs_cp1_t cp1;
+ xtregs_cp2_t cp2;
+ xtregs_cp3_t cp3;
+ xtregs_cp4_t cp4;
+ xtregs_cp5_t cp5;
+ xtregs_cp6_t cp6;
+ xtregs_cp7_t cp7;
+} xtregs_coprocessor_t;
+
+#endif
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ unsigned long status; /* thread-synchronous flags */
+ __u32 cpu; /* current CPU */
+ __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
+
+#if XCHAL_HAVE_EXCLUSIVE
+ /* result of the most recent exclusive store */
+ unsigned long atomctl8;
+#endif
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ /* Address where PS.WOE was enabled by the ABI probing code */
+ unsigned long ps_woe_fix_addr;
+#endif
+
+ /*
+ * If i-th bit is set then coprocessor state is loaded into the
+ * coprocessor i on CPU cp_owner_cpu.
+ */
+ unsigned long cpenable;
+ u32 cp_owner_cpu;
+ /* Allocate storage for extra user states and coprocessor states. */
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_coprocessor_t xtregs_cp;
+#endif
+ xtregs_user_t xtregs_user;
+};
+
+#endif
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+}
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ struct thread_info *ti;
+ __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
+ "xor %0, a1, %0" : "=&r" (ti) : );
+ return ti;
+}
+
+#else /* !__ASSEMBLY__ */
+
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg,sp) \
+ extui reg, sp, 0, CURRENT_SHIFT; \
+ xor reg, sp, reg
+#endif
+
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
+#define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */
+#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
+#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
+#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
+#define TIF_DB_DISABLED 8 /* debug trap disabled for syscall */
+#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
+#define TIF_SECCOMP 10 /* secure computing */
+#define TIF_MEMDIE 11 /* is terminating due to OOM killer */
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+
+#define _TIF_WORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
+ _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
+
+#define THREAD_SIZE KERNEL_STACK_SIZE
+#define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)
+
+#endif /* _XTENSA_THREAD_INFO */
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
new file mode 100644
index 000000000..3f2462f2d
--- /dev/null
+++ b/arch/xtensa/include/asm/timex.h
@@ -0,0 +1,60 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TIMEX_H
+#define _XTENSA_TIMEX_H
+
+#include <asm/processor.h>
+
+#if XCHAL_NUM_TIMERS > 0 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 0
+# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
+#elif XCHAL_NUM_TIMERS > 1 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 1
+# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
+#elif XCHAL_NUM_TIMERS > 2 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 2
+# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
+#else
+# error "Bad timer number for Linux configurations!"
+#endif
+
+extern unsigned long ccount_freq;
+
+void local_timer_setup(unsigned cpu);
+
+/*
+ * Register access.
+ */
+
+static inline unsigned long get_ccount (void)
+{
+ return xtensa_get_sr(ccount);
+}
+
+static inline void set_ccount (unsigned long ccount)
+{
+ xtensa_set_sr(ccount, ccount);
+}
+
+static inline unsigned long get_linux_timer (void)
+{
+ return xtensa_get_sr(SREG_CCOMPARE + LINUX_TIMER);
+}
+
+static inline void set_linux_timer (unsigned long ccompare)
+{
+ xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
+}
+
+#include <asm-generic/timex.h>
+
+#endif /* _XTENSA_TIMEX_H */
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
new file mode 100644
index 000000000..8c3ceb427
--- /dev/null
+++ b/arch/xtensa/include/asm/tlb.h
@@ -0,0 +1,23 @@
+/*
+ * include/asm-xtensa/tlb.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TLB_H
+#define _XTENSA_TLB_H
+
+#include <asm/cache.h>
+#include <asm/page.h>
+
+#include <asm-generic/tlb.h>
+
+#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
+
+void check_tlb_sanity(void);
+
+#endif /* _XTENSA_TLB_H */
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
new file mode 100644
index 000000000..573df8cea
--- /dev/null
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -0,0 +1,205 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TLBFLUSH_H
+#define _XTENSA_TLBFLUSH_H
+
+#include <linux/stringify.h>
+#include <asm/processor.h>
+
+#define DTLB_WAY_PGD 7
+
+#define ITLB_ARF_WAYS 4
+#define DTLB_ARF_WAYS 4
+
+#define ITLB_HIT_BIT 3
+#define DTLB_HIT_BIT 4
+
+#ifndef __ASSEMBLY__
+
+/* TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(vma, page) flushes a single page
+ * - flush_tlb_range(vma, vmaddr, end) flushes a range of pages
+ */
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long page);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#ifdef CONFIG_SMP
+
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *);
+void flush_tlb_page(struct vm_area_struct *, unsigned long);
+void flush_tlb_range(struct vm_area_struct *, unsigned long,
+ unsigned long);
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#else /* !CONFIG_SMP */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
+ end)
+#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
+ end)
+
+#endif /* CONFIG_SMP */
+
+/* TLB operations. */
+
+static inline unsigned long itlb_probe(unsigned long addr)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
+ return tmp;
+}
+
+static inline unsigned long dtlb_probe(unsigned long addr)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
+ return tmp;
+}
+
+static inline void invalidate_itlb_entry (unsigned long probe)
+{
+ __asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
+}
+
+static inline void invalidate_dtlb_entry (unsigned long probe)
+{
+ __asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
+}
+
+/* Use the .._no_isync functions with caution. Generally, these are
+ * handy for bulk invalidates followed by a single 'isync'. The
+ * caller must follow up with an 'isync', which can be relatively
+ * expensive on some Xtensa implementations.
+ */
+static inline void invalidate_itlb_entry_no_isync (unsigned entry)
+{
+ /* Caller must follow up with 'isync'. */
+ __asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
+}
+
+static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
+{
+ /* Caller must follow up with 'isync'. */
+ __asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
+}
+
+static inline void set_itlbcfg_register (unsigned long val)
+{
+ __asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
+ : : "a" (val));
+}
+
+static inline void set_dtlbcfg_register (unsigned long val)
+{
+ __asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t"
+ : : "a" (val));
+}
+
+static inline void set_ptevaddr_register (unsigned long val)
+{
+ __asm__ __volatile__(" wsr %0, ptevaddr; isync\n"
+ : : "a" (val));
+}
+
+static inline unsigned long read_ptevaddr_register (void)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp));
+ return tmp;
+}
+
+static inline void write_dtlb_entry (pte_t entry, int way)
+{
+ __asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
+ : : "r" (way), "r" (entry) );
+}
+
+static inline void write_itlb_entry (pte_t entry, int way)
+{
+ __asm__ __volatile__("witlb %1, %0; isync\n\t"
+ : : "r" (way), "r" (entry) );
+}
+
+static inline void invalidate_page_directory (void)
+{
+ invalidate_dtlb_entry (DTLB_WAY_PGD);
+ invalidate_dtlb_entry (DTLB_WAY_PGD+1);
+ invalidate_dtlb_entry (DTLB_WAY_PGD+2);
+}
+
+static inline void invalidate_itlb_mapping (unsigned address)
+{
+ unsigned long tlb_entry;
+ if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
+ invalidate_itlb_entry(tlb_entry);
+}
+
+static inline void invalidate_dtlb_mapping (unsigned address)
+{
+ unsigned long tlb_entry;
+ if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
+ invalidate_dtlb_entry(tlb_entry);
+}
+
+/*
+ * DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
+ * ISA and exist only for test purposes..
+ * You may find it helpful for MMU debugging, however.
+ *
+ * 'at' is the unmodified input register
+ * 'as' is the output register, as follows (specific to the Linux config):
+ *
+ * as[31..12] contain the virtual address
+ * as[11..08] are meaningless
+ * as[07..00] contain the asid
+ */
+
+static inline unsigned long read_dtlb_virtual (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_dtlb_translation (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_itlb_virtual (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_itlb_translation (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_TLBFLUSH_H */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
new file mode 100644
index 000000000..212c3b9ff
--- /dev/null
+++ b/arch/xtensa/include/asm/traps.h
@@ -0,0 +1,145 @@
+/*
+ * arch/xtensa/include/asm/traps.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Tensilica Inc.
+ */
+#ifndef _XTENSA_TRAPS_H
+#define _XTENSA_TRAPS_H
+
+#include <asm/ptrace.h>
+
+typedef void xtensa_exception_handler(struct pt_regs *regs);
+
+/*
+ * Per-CPU exception handling data structure.
+ * EXCSAVE1 points to it.
+ */
+struct exc_table {
+ /* Kernel Stack */
+ void *kstk;
+ /* Double exception save area for a0 */
+ unsigned long double_save;
+ /* Fixup handler */
+ void *fixup;
+ /* For passing a parameter to fixup */
+ void *fixup_param;
+#if XTENSA_HAVE_COPROCESSORS
+ /* Pointers to owner struct thread_info */
+ struct thread_info *coprocessor_owner[XCHAL_CP_MAX];
+#endif
+ /* Fast user exception handlers */
+ void *fast_user_handler[EXCCAUSE_N];
+ /* Fast kernel exception handlers */
+ void *fast_kernel_handler[EXCCAUSE_N];
+ /* Default C-Handlers */
+ xtensa_exception_handler *default_handler[EXCCAUSE_N];
+};
+
+DECLARE_PER_CPU(struct exc_table, exc_table);
+
+xtensa_exception_handler *
+__init trap_set_handler(int cause, xtensa_exception_handler *handler);
+
+asmlinkage void fast_illegal_instruction_user(void);
+asmlinkage void fast_syscall_user(void);
+asmlinkage void fast_alloca(void);
+asmlinkage void fast_load_store(void);
+asmlinkage void fast_unaligned(void);
+asmlinkage void fast_second_level_miss(void);
+asmlinkage void fast_store_prohibited(void);
+asmlinkage void fast_coprocessor(void);
+
+asmlinkage void kernel_exception(void);
+asmlinkage void user_exception(void);
+asmlinkage void system_call(struct pt_regs *regs);
+
+void do_IRQ(int hwirq, struct pt_regs *regs);
+void do_page_fault(struct pt_regs *regs);
+void do_unhandled(struct pt_regs *regs);
+
+/* Initialize minimal exc_table structure sufficient for basic paging */
+static inline void __init early_trap_init(void)
+{
+ static struct exc_table init_exc_table __initdata = {
+#ifdef CONFIG_XTENSA_LOAD_STORE
+ .fast_kernel_handler[EXCCAUSE_LOAD_STORE_ERROR] =
+ fast_load_store,
+#endif
+#ifdef CONFIG_MMU
+ .fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
+ fast_second_level_miss,
+#endif
+ };
+ xtensa_set_sr(&init_exc_table, excsave1);
+}
+
+void secondary_trap_init(void);
+
+static inline void spill_registers(void)
+{
+#if defined(__XTENSA_WINDOWED_ABI__)
+#if XCHAL_NUM_AREGS > 16
+ __asm__ __volatile__ (
+ " call8 1f\n"
+ " _j 2f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+#if XCHAL_NUM_AREGS == 32
+ " _entry a1, 32\n"
+ " addi a8, a0, 3\n"
+ " _entry a1, 16\n"
+ " mov a12, a12\n"
+ " retw\n"
+#else
+ " _entry a1, 48\n"
+ " call12 1f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
+ " _entry a1, 48\n"
+ " mov a12, a0\n"
+ " .endr\n"
+ " _entry a1, 16\n"
+#if XCHAL_NUM_AREGS % 12 == 0
+ " mov a12, a12\n"
+#elif XCHAL_NUM_AREGS % 12 == 4
+ " mov a4, a4\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+ " mov a8, a8\n"
+#endif
+ " retw\n"
+#endif
+ "2:\n"
+ : : : "a8", "a9", "memory");
+#else
+ __asm__ __volatile__ (
+ " mov a12, a12\n"
+ : : : "memory");
+#endif
+#endif
+}
+
+struct debug_table {
+ /* Pointer to debug exception handler */
+ void (*debug_exception)(void);
+ /* Temporary register save area */
+ unsigned long debug_save[1];
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ /* Save area for DBREAKC registers */
+ unsigned long dbreakc_save[XCHAL_NUM_DBREAK];
+ /* Saved ICOUNT register */
+ unsigned long icount_save;
+ /* Saved ICOUNTLEVEL register */
+ unsigned long icount_level_save;
+#endif
+};
+
+void debug_exception(void);
+
+#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
new file mode 100644
index 000000000..56aec6d50
--- /dev/null
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -0,0 +1,295 @@
+/*
+ * include/asm-xtensa/uaccess.h
+ *
+ * User space memory access functions
+ *
+ * These routines provide basic accessing functions to the user memory
+ * space for the kernel. This header file provides functions such as:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UACCESS_H
+#define _XTENSA_UACCESS_H
+
+#include <linux/prefetch.h>
+#include <asm/types.h>
+#include <asm/extable.h>
+#include <asm-generic/access_ok.h>
+
+/*
+ * These are the main single-value transfer routines. They
+ * automatically use the right size if we just have the right pointer
+ * type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in
+ * "get_user()" and yet we don't want to do any pointers, because that
+ * is too much of a performance impact. Thus we have a few rather ugly
+ * macros here, and hide all the uglyness from the user.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
+#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ */
+#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+
+extern long __put_user_bad(void);
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err; \
+ __put_user_size((x), (ptr), (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ if (access_ok(__pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ int __cb; \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \
+ case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \
+ case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
+ case 8: { \
+ __typeof__(*ptr) __v64 = x; \
+ retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0; \
+ break; \
+ } \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+
+/*
+ * Consider a case of a user single load/store would cause both an
+ * unaligned exception and an MMU-related exception (unaligned
+ * exceptions happen first):
+ *
+ * User code passes a bad variable ptr to a system call.
+ * Kernel tries to access the variable.
+ * Unaligned exception occurs.
+ * Unaligned exception handler tries to make aligned accesses.
+ * Double exception occurs for MMU-related cause (e.g., page not mapped).
+ * do_page_fault() thinks the fault address belongs to the kernel, not the
+ * user, and panics.
+ *
+ * The kernel currently prohibits user unaligned accesses. We use the
+ * __check_align_* macros to check for unaligned addresses before
+ * accessing user space so we don't crash the kernel. Both
+ * __put_user_asm and __get_user_asm use these alignment macros, so
+ * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
+ * sync.
+ */
+
+#define __check_align_1 ""
+
+#define __check_align_2 \
+ " _bbci.l %[mem] * 0, 1f \n" \
+ " movi %[err], %[efault] \n" \
+ " _j 2f \n"
+
+#define __check_align_4 \
+ " _bbsi.l %[mem] * 0, 0f \n" \
+ " _bbci.l %[mem] * 0 + 1, 1f \n" \
+ "0: movi %[err], %[efault] \n" \
+ " _j 2f \n"
+
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ *
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
+ */
+#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
+__asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %[x], %[mem] \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ " .literal_position \n" \
+ "5: \n" \
+ " movi %[tmp], 2b \n" \
+ " movi %[err], %[efault] \n" \
+ " jx %[tmp] \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
+ :[x] "r"(x_), [efault] "i"(-EFAULT))
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ __get_user_size((x), (ptr), (size), __gu_err); \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT; \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ if (access_ok(__gu_addr, size)) \
+ __get_user_size((x), __gu_addr, (size), __gu_err); \
+ else \
+ (x) = (__typeof__(*(ptr)))0; \
+ __gu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ int __cb; \
+ retval = 0; \
+ switch (size) { \
+ case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
+ case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
+ case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
+ case 8: { \
+ u64 __x; \
+ if (unlikely(__copy_from_user(&__x, ptr, 8))) { \
+ retval = -EFAULT; \
+ (x) = (__typeof__(*(ptr)))0; \
+ } else { \
+ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
+ } \
+ break; \
+ } \
+ default: \
+ (x) = (__typeof__(*(ptr)))0; \
+ __get_user_bad(); \
+ } \
+} while (0)
+
+
+/*
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
+ */
+#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
+do { \
+ u32 __x = 0; \
+ __asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %[x], %[mem] \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ " .literal_position \n" \
+ "5: \n" \
+ " movi %[tmp], 2b \n" \
+ " movi %[err], %[efault] \n" \
+ " jx %[tmp] \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
+ :[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
+ (x_) = (__force __typeof__(*(addr_)))__x; \
+} while (0)
+
+
+/*
+ * Copy to/from user space
+ */
+
+extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ prefetchw(to);
+ return __xtensa_copy_user(to, (__force const void *)from, n);
+}
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ prefetch(from);
+ return __xtensa_copy_user((__force void *)to, from, n);
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+/*
+ * We need to return the number of bytes not cleared. Our memset()
+ * returns zero if a problem occurs while accessing user-space memory.
+ * In that event, return no memory cleared. Otherwise, zero for
+ * success.
+ */
+
+static inline unsigned long
+__xtensa_clear_user(void __user *addr, unsigned long size)
+{
+ if (!__memset((void __force *)addr, 0, size))
+ return size;
+ return 0;
+}
+
+static inline unsigned long
+clear_user(void __user *addr, unsigned long size)
+{
+ if (access_ok(addr, size))
+ return __xtensa_clear_user(addr, size);
+ return size ? -EFAULT : 0;
+}
+
+#define __clear_user __xtensa_clear_user
+
+
+#ifdef CONFIG_ARCH_HAS_STRNCPY_FROM_USER
+extern long __strncpy_user(char *dst, const char __user *src, long count);
+
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ if (access_ok(src, 1))
+ return __strncpy_user(dst, src, count);
+ return -EFAULT;
+}
+#else
+long strncpy_from_user(char *dst, const char __user *src, long count);
+#endif
+
+/*
+ * Return the size of a string (including the ending 0!)
+ */
+extern long __strnlen_user(const char __user *str, long len);
+
+static inline long strnlen_user(const char __user *str, long len)
+{
+ if (!access_ok(str, 1))
+ return 0;
+ return __strnlen_user(str, len);
+}
+
+#endif /* _XTENSA_UACCESS_H */
diff --git a/arch/xtensa/include/asm/ucontext.h b/arch/xtensa/include/asm/ucontext.h
new file mode 100644
index 000000000..94c94ed3e
--- /dev/null
+++ b/arch/xtensa/include/asm/ucontext.h
@@ -0,0 +1,22 @@
+/*
+ * include/asm-xtensa/ucontext.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UCONTEXT_H
+#define _XTENSA_UCONTEXT_H
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* _XTENSA_UCONTEXT_H */
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
new file mode 100644
index 000000000..b52236245
--- /dev/null
+++ b/arch/xtensa/include/asm/unistd.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _XTENSA_UNISTD_H
+#define _XTENSA_UNISTD_H
+
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_UTIME32
+#define __ARCH_WANT_SYS_GETPGRP
+
+#define NR_syscalls __NR_syscalls
+
+#endif /* _XTENSA_UNISTD_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
new file mode 100644
index 000000000..704286c35
--- /dev/null
+++ b/arch/xtensa/include/asm/vectors.h
@@ -0,0 +1,93 @@
+/*
+ * arch/xtensa/include/asm/xchal_vaddr_remap.h
+ *
+ * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
+ * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
+ * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica Inc.
+ *
+ * Pete Delaney <piet@tensilica.com>
+ * Marc Gauthier <marc@tensilica.com
+ */
+
+#ifndef _XTENSA_VECTORS_H
+#define _XTENSA_VECTORS_H
+
+#include <asm/core.h>
+#include <asm/kmem_layout.h>
+
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+#ifdef CONFIG_KERNEL_VIRTUAL_ADDRESS
+#define KERNELOFFSET CONFIG_KERNEL_VIRTUAL_ADDRESS
+#else
+#define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \
+ XCHAL_KSEG_CACHED_VADDR - \
+ XCHAL_KSEG_PADDR)
+#endif
+#else
+#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
+#endif
+
+#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
+#ifdef CONFIG_VECTORS_ADDR
+#define VECBASE_VADDR (CONFIG_VECTORS_ADDR)
+#else
+#define VECBASE_VADDR _vecbase
+#endif
+
+#if XCHAL_HAVE_VECBASE
+
+#define VECTOR_VADDR(offset) (VECBASE_VADDR + offset)
+
+#define USER_VECTOR_VADDR VECTOR_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR VECTOR_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR VECTOR_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR VECTOR_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL6_VECOFS)
+#define INTLEVEL7_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL7_VECOFS)
+#define DEBUG_VECTOR_VADDR VECTOR_VADDR(XCHAL_DEBUG_VECOFS)
+
+/*
+ * These XCHAL_* #defines from varian/core.h
+ * are not valid to use with V3 MMU. Non-XCHAL
+ * constants are defined above and should be used.
+ */
+#undef XCHAL_VECBASE_RESET_VADDR
+#undef XCHAL_USER_VECTOR_VADDR
+#undef XCHAL_KERNEL_VECTOR_VADDR
+#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
+#undef XCHAL_WINDOW_VECTORS_VADDR
+#undef XCHAL_INTLEVEL2_VECTOR_VADDR
+#undef XCHAL_INTLEVEL3_VECTOR_VADDR
+#undef XCHAL_INTLEVEL4_VECTOR_VADDR
+#undef XCHAL_INTLEVEL5_VECTOR_VADDR
+#undef XCHAL_INTLEVEL6_VECTOR_VADDR
+#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+#undef XCHAL_DEBUG_VECTOR_VADDR
+
+#else
+
+#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR
+#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR
+#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR
+#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR
+#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
+#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR
+#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
+#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define INTLEVEL7_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
+
+#endif
+
+#endif /* _XTENSA_VECTORS_H */
diff --git a/arch/xtensa/include/asm/vermagic.h b/arch/xtensa/include/asm/vermagic.h
new file mode 100644
index 000000000..6d9c670e4
--- /dev/null
+++ b/arch/xtensa/include/asm/vermagic.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#include <linux/stringify.h>
+#include <variant/core.h>
+
+#define MODULE_ARCH_VERMAGIC "xtensa-" __stringify(XCHAL_CORE_ID) " "
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/xtensa/include/asm/vmalloc.h b/arch/xtensa/include/asm/vmalloc.h
new file mode 100644
index 000000000..0eb94b70b
--- /dev/null
+++ b/arch/xtensa/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_XTENSA_VMALLOC_H
+#define _ASM_XTENSA_VMALLOC_H
+
+#endif /* _ASM_XTENSA_VMALLOC_H */