summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/include/asm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /arch/xtensa/include/asm
parentInitial commit. (diff)
downloadlinux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz
linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip
Adding upstream version 4.19.249.upstream/4.19.249
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/xtensa/include/asm')
-rw-r--r--arch/xtensa/include/asm/Kbuild30
-rw-r--r--arch/xtensa/include/asm/asm-offsets.h1
-rw-r--r--arch/xtensa/include/asm/asm-uaccess.h157
-rw-r--r--arch/xtensa/include/asm/asmmacro.h194
-rw-r--r--arch/xtensa/include/asm/atomic.h205
-rw-r--r--arch/xtensa/include/asm/barrier.h21
-rw-r--r--arch/xtensa/include/asm/bitops.h237
-rw-r--r--arch/xtensa/include/asm/bootparam.h50
-rw-r--r--arch/xtensa/include/asm/bugs.h18
-rw-r--r--arch/xtensa/include/asm/cache.h34
-rw-r--r--arch/xtensa/include/asm/cacheasm.h214
-rw-r--r--arch/xtensa/include/asm/cacheflush.h182
-rw-r--r--arch/xtensa/include/asm/checksum.h254
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h162
-rw-r--r--arch/xtensa/include/asm/coprocessor.h177
-rw-r--r--arch/xtensa/include/asm/current.h38
-rw-r--r--arch/xtensa/include/asm/delay.h75
-rw-r--r--arch/xtensa/include/asm/dma.h62
-rw-r--r--arch/xtensa/include/asm/elf.h207
-rw-r--r--arch/xtensa/include/asm/fixmap.h85
-rw-r--r--arch/xtensa/include/asm/flat.h24
-rw-r--r--arch/xtensa/include/asm/ftrace.h40
-rw-r--r--arch/xtensa/include/asm/futex.h127
-rw-r--r--arch/xtensa/include/asm/highmem.h100
-rw-r--r--arch/xtensa/include/asm/hw_breakpoint.h61
-rw-r--r--arch/xtensa/include/asm/hw_irq.h14
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h216
-rw-r--r--arch/xtensa/include/asm/io.h85
-rw-r--r--arch/xtensa/include/asm/irq.h42
-rw-r--r--arch/xtensa/include/asm/irqflags.h82
-rw-r--r--arch/xtensa/include/asm/kasan.h39
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h75
-rw-r--r--arch/xtensa/include/asm/linkage.h9
-rw-r--r--arch/xtensa/include/asm/mmu.h22
-rw-r--r--arch/xtensa/include/asm/mmu_context.h159
-rw-r--r--arch/xtensa/include/asm/module.h20
-rw-r--r--arch/xtensa/include/asm/mxregs.h46
-rw-r--r--arch/xtensa/include/asm/nommu_context.h34
-rw-r--r--arch/xtensa/include/asm/page.h199
-rw-r--r--arch/xtensa/include/asm/pci-bridge.h88
-rw-r--r--arch/xtensa/include/asm/pci.h53
-rw-r--r--arch/xtensa/include/asm/perf_event.h4
-rw-r--r--arch/xtensa/include/asm/pgalloc.h85
-rw-r--r--arch/xtensa/include/asm/pgtable.h454
-rw-r--r--arch/xtensa/include/asm/platform.h105
-rw-r--r--arch/xtensa/include/asm/processor.h250
-rw-r--r--arch/xtensa/include/asm/ptrace.h110
-rw-r--r--arch/xtensa/include/asm/regs.h117
-rw-r--r--arch/xtensa/include/asm/segment.h16
-rw-r--r--arch/xtensa/include/asm/serial.h18
-rw-r--r--arch/xtensa/include/asm/shmparam.h21
-rw-r--r--arch/xtensa/include/asm/signal.h23
-rw-r--r--arch/xtensa/include/asm/smp.h43
-rw-r--r--arch/xtensa/include/asm/spinlock.h199
-rw-r--r--arch/xtensa/include/asm/spinlock_types.h21
-rw-r--r--arch/xtensa/include/asm/stackprotector.h40
-rw-r--r--arch/xtensa/include/asm/stacktrace.h44
-rw-r--r--arch/xtensa/include/asm/string.h140
-rw-r--r--arch/xtensa/include/asm/switch_to.h22
-rw-r--r--arch/xtensa/include/asm/syscall.h27
-rw-r--r--arch/xtensa/include/asm/sysmem.h19
-rw-r--r--arch/xtensa/include/asm/thread_info.h135
-rw-r--r--arch/xtensa/include/asm/timex.h70
-rw-r--r--arch/xtensa/include/asm/tlb.h47
-rw-r--r--arch/xtensa/include/asm/tlbflush.h208
-rw-r--r--arch/xtensa/include/asm/traps.h120
-rw-r--r--arch/xtensa/include/asm/types.h23
-rw-r--r--arch/xtensa/include/asm/uaccess.h311
-rw-r--r--arch/xtensa/include/asm/ucontext.h22
-rw-r--r--arch/xtensa/include/asm/unaligned.h29
-rw-r--r--arch/xtensa/include/asm/unistd.h25
-rw-r--r--arch/xtensa/include/asm/user.h20
-rw-r--r--arch/xtensa/include/asm/vectors.h125
-rw-r--r--arch/xtensa/include/asm/vga.h19
74 files changed, 6850 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
new file mode 100644
index 000000000..82c756431
--- /dev/null
+++ b/arch/xtensa/include/asm/Kbuild
@@ -0,0 +1,30 @@
+generic-y += bug.h
+generic-y += compat.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma-contiguous.h
+generic-y += dma-mapping.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += extable.h
+generic-y += fb.h
+generic-y += hardirq.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kprobes.h
+generic-y += linkage.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
+generic-y += param.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += rwsem.h
+generic-y += sections.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/xtensa/include/asm/asm-offsets.h b/arch/xtensa/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h
new file mode 100644
index 000000000..dfdf9fae1
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-uaccess.h
@@ -0,0 +1,157 @@
+/*
+ * include/asm-xtensa/uaccess.h
+ *
+ * User space memory access functions
+ *
+ * These routines provide basic accessing functions to the user memory
+ * space for the kernel. This header file provides functions such as:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ASM_UACCESS_H
+#define _XTENSA_ASM_UACCESS_H
+
+#include <linux/errno.h>
+#include <asm/types.h>
+
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+
+/*
+ * These assembly macros mirror the C macros in asm/uaccess.h. They
+ * should always have identical functionality. See
+ * arch/xtensa/kernel/sys.S for usage.
+ */
+
+#define KERNEL_DS 0
+#define USER_DS 1
+
+#define get_ds (KERNEL_DS)
+
+/*
+ * get_fs reads current->thread.current_ds into a register.
+ * On Entry:
+ * <ad> anything
+ * <sp> stack
+ * On Exit:
+ * <ad> contains current->thread.current_ds
+ */
+ .macro get_fs ad, sp
+ GET_CURRENT(\ad,\sp)
+#if THREAD_CURRENT_DS > 1020
+ addi \ad, \ad, TASK_THREAD
+ l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
+#else
+ l32i \ad, \ad, THREAD_CURRENT_DS
+#endif
+ .endm
+
+/*
+ * set_fs sets current->thread.current_ds to some value.
+ * On Entry:
+ * <at> anything (temp register)
+ * <av> value to write
+ * <sp> stack
+ * On Exit:
+ * <at> destroyed (actually, current)
+ * <av> preserved, value to write
+ */
+ .macro set_fs at, av, sp
+ GET_CURRENT(\at,\sp)
+ s32i \av, \at, THREAD_CURRENT_DS
+ .endm
+
+/*
+ * kernel_ok determines whether we should bypass addr/size checking.
+ * See the equivalent C-macro version below for clarity.
+ * On success, kernel_ok branches to a label indicated by parameter
+ * <success>. This implies that the macro falls through to the next
+ * insruction on an error.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on error).
+ *
+ * On Entry:
+ * <at> anything (temp register)
+ * <success> label to branch to on success; implies
+ * fall-through macro on error
+ * <sp> stack pointer
+ * On Exit:
+ * <at> destroyed (actually, current->thread.current_ds)
+ */
+
+#if ((KERNEL_DS != 0) || (USER_DS == 0))
+# error Assembly macro kernel_ok fails
+#endif
+ .macro kernel_ok at, sp, success
+ get_fs \at, \sp
+ beqz \at, \success
+ .endm
+
+/*
+ * user_ok determines whether the access to user-space memory is allowed.
+ * See the equivalent C-macro version below for clarity.
+ *
+ * On error, user_ok branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on success).
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed (actually, (TASK_SIZE + 1 - size))
+ */
+ .macro user_ok aa, as, at, error
+ movi \at, __XTENSA_UL_CONST(TASK_SIZE)
+ bgeu \as, \at, \error
+ sub \at, \at, \as
+ bgeu \aa, \at, \error
+ .endm
+
+/*
+ * access_ok determines whether a memory access is allowed. See the
+ * equivalent C-macro version below for clarity.
+ *
+ * On error, access_ok branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that we assume success is the common case, and we optimize the
+ * branch fall-through case on success.
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <sp>
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed
+ */
+ .macro access_ok aa, as, at, sp, error
+ kernel_ok \at, \sp, .Laccess_ok_\@
+ user_ok \aa, \as, \at, \error
+.Laccess_ok_\@:
+ .endm
+
+#endif /* _XTENSA_ASM_UACCESS_H */
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
new file mode 100644
index 000000000..7f2ae5872
--- /dev/null
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -0,0 +1,194 @@
+/*
+ * include/asm-xtensa/asmmacro.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ASMMACRO_H
+#define _XTENSA_ASMMACRO_H
+
+#include <variant/core.h>
+
+/*
+ * Some little helpers for loops. Use zero-overhead-loops
+ * where applicable and if supported by the processor.
+ *
+ * __loopi ar, at, size, inc
+ * ar register initialized with the start address
+ * at scratch register used by macro
+ * size size immediate value
+ * inc increment
+ *
+ * __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
+ * ar register initialized with the start address
+ * as register initialized with the size
+ * at scratch register use by macro
+ * inc_log2 increment [in log2]
+ * mask_log2 mask [in log2]
+ * cond true condition (used in loop'cond')
+ * ncond false condition (used in b'ncond')
+ *
+ * __loop as
+ * restart loop. 'as' register must not have been modified!
+ *
+ * __endla ar, as, incr
+ * ar start address (modified)
+ * as scratch register used by __loops/__loopi macros or
+ * end address used by __loopt macro
+ * inc increment
+ */
+
+/*
+ * loop for given size as immediate
+ */
+
+ .macro __loopi ar, at, size, incr
+
+#if XCHAL_HAVE_LOOPS
+ movi \at, ((\size + \incr - 1) / (\incr))
+ loop \at, 99f
+#else
+ addi \at, \ar, \size
+ 98:
+#endif
+
+ .endm
+
+/*
+ * loop for given size in register
+ */
+
+ .macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
+
+#if XCHAL_HAVE_LOOPS
+ .ifgt \incr_log2 - 1
+ addi \at, \as, (1 << \incr_log2) - 1
+ .ifnc \mask_log2,
+ extui \at, \at, \incr_log2, \mask_log2
+ .else
+ srli \at, \at, \incr_log2
+ .endif
+ .endif
+ loop\cond \at, 99f
+#else
+ .ifnc \mask_log2,
+ extui \at, \as, \incr_log2, \mask_log2
+ .else
+ .ifnc \ncond,
+ srli \at, \as, \incr_log2
+ .endif
+ .endif
+ .ifnc \ncond,
+ b\ncond \at, 99f
+
+ .endif
+ .ifnc \mask_log2,
+ slli \at, \at, \incr_log2
+ add \at, \ar, \at
+ .else
+ add \at, \ar, \as
+ .endif
+#endif
+ 98:
+
+ .endm
+
+/*
+ * loop from ar to as
+ */
+
+ .macro __loopt ar, as, at, incr_log2
+
+#if XCHAL_HAVE_LOOPS
+ sub \at, \as, \ar
+ .ifgt \incr_log2 - 1
+ addi \at, \at, (1 << \incr_log2) - 1
+ srli \at, \at, \incr_log2
+ .endif
+ loop \at, 99f
+#else
+ 98:
+#endif
+
+ .endm
+
+/*
+ * restart loop. registers must be unchanged
+ */
+
+ .macro __loop as
+
+#if XCHAL_HAVE_LOOPS
+ loop \as, 99f
+#else
+ 98:
+#endif
+
+ .endm
+
+/*
+ * end of loop with no increment of the address.
+ */
+
+ .macro __endl ar, as
+#if !XCHAL_HAVE_LOOPS
+ bltu \ar, \as, 98b
+#endif
+ 99:
+ .endm
+
+/*
+ * end of loop with increment of the address.
+ */
+
+ .macro __endla ar, as, incr
+ addi \ar, \ar, \incr
+ __endl \ar \as
+ .endm
+
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(handler) \
+ .section __ex_table, "a"; \
+ .word 97f, handler; \
+ .previous \
+97:
+
+
+/*
+ * Extract unaligned word that is split between two registers w0 and w1
+ * into r regardless of machine endianness. SAR must be loaded with the
+ * starting bit of the word (see __ssa8).
+ */
+
+ .macro __src_b r, w0, w1
+#ifdef __XTENSA_EB__
+ src \r, \w0, \w1
+#else
+ src \r, \w1, \w0
+#endif
+ .endm
+
+/*
+ * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned
+ * word starting at r from two registers loaded from consecutive aligned
+ * addresses covering r regardless of machine endianness.
+ *
+ * r 0 1 2 3
+ * LE SAR 0 8 16 24
+ * BE SAR 32 24 16 8
+ */
+
+ .macro __ssa8 r
+#ifdef __XTENSA_EB__
+ ssa8b \r
+#else
+ ssa8l \r
+#endif
+ .endm
+
+#endif /* _XTENSA_ASMMACRO_H */
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
new file mode 100644
index 000000000..7de0149e1
--- /dev/null
+++ b/arch/xtensa/include/asm/atomic.h
@@ -0,0 +1,205 @@
+/*
+ * include/asm-xtensa/atomic.h
+ *
+ * Atomic operations that C can't guarantee us. Useful for resource counting..
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ATOMIC_H
+#define _XTENSA_ATOMIC_H
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+#include <asm/processor.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#define ATOMIC_INIT(i) { (i) }
+
+/*
+ * This Xtensa implementation assumes that the right mechanism
+ * for exclusion is for locking interrupts to level EXCM_LEVEL.
+ *
+ * Locking interrupts looks like this:
+ *
+ * rsil a15, TOPLEVEL
+ * <code>
+ * wsr a15, PS
+ * rsync
+ *
+ * Note that a15 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a15 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define atomic_read(v) READ_ONCE((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+
+#if XCHAL_HAVE_S32C1I
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %1, %3, 0\n" \
+ " wsr %1, scompare1\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32c1i %0, %3, 0\n" \
+ " bne %0, %1, 1b\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %1, %3, 0\n" \
+ " wsr %1, scompare1\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32c1i %0, %3, 0\n" \
+ " bne %0, %1, 1b\n" \
+ " " #op " %0, %0, %2\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %1, %3, 0\n" \
+ " wsr %1, scompare1\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32c1i %0, %3, 0\n" \
+ " bne %0, %1, 1b\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned int vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15, "__stringify(TOPLEVEL)"\n"\
+ " l32i %0, %2, 0\n" \
+ " " #op " %0, %0, %1\n" \
+ " s32i %0, %2, 0\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : "=&a" (vval) \
+ : "a" (i), "a" (v) \
+ : "a15", "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned int vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15,"__stringify(TOPLEVEL)"\n" \
+ " l32i %0, %2, 0\n" \
+ " " #op " %0, %0, %1\n" \
+ " s32i %0, %2, 0\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : "=&a" (vval) \
+ : "a" (i), "a" (v) \
+ : "a15", "memory" \
+ ); \
+ \
+ return vval; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned int tmp, vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15,"__stringify(TOPLEVEL)"\n" \
+ " l32i %0, %3, 0\n" \
+ " " #op " %1, %0, %2\n" \
+ " s32i %1, %3, 0\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : "=&a" (vval), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "a15", "memory" \
+ ); \
+ \
+ return vval; \
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
+
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif /* __KERNEL__ */
+
+#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
new file mode 100644
index 000000000..956596e4d
--- /dev/null
+++ b/arch/xtensa/include/asm/barrier.h
@@ -0,0 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2012 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SYSTEM_H
+#define _XTENSA_SYSTEM_H
+
+#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
+#define rmb() barrier()
+#define wmb() mb()
+
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+
+#include <asm-generic/barrier.h>
+
+#endif /* _XTENSA_SYSTEM_H */
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
new file mode 100644
index 000000000..d34901897
--- /dev/null
+++ b/arch/xtensa/include/asm/bitops.h
@@ -0,0 +1,237 @@
+/*
+ * include/asm-xtensa/bitops.h
+ *
+ * Atomic operations that C can't guarantee us.Useful for resource counting etc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_BITOPS_H
+#define _XTENSA_BITOPS_H
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm/processor.h>
+#include <asm/byteorder.h>
+#include <asm/barrier.h>
+
+#include <asm-generic/bitops/non-atomic.h>
+
+#if XCHAL_HAVE_NSA
+
+static inline unsigned long __cntlz (unsigned long x)
+{
+ int lz;
+ asm ("nsau %0, %1" : "=r" (lz) : "r" (x));
+ return lz;
+}
+
+/*
+ * ffz: Find first zero in word. Undefined if no zero exists.
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+static inline int ffz(unsigned long x)
+{
+ return 31 - __cntlz(~x & -~x);
+}
+
+/*
+ * __ffs: Find first bit set in word. Return 0 for bit 0
+ */
+
+static inline unsigned long __ffs(unsigned long x)
+{
+ return 31 - __cntlz(x & -x);
+}
+
+/*
+ * ffs: Find first bit set in word. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+static inline int ffs(unsigned long x)
+{
+ return 32 - __cntlz(x & -x);
+}
+
+/*
+ * fls: Find last (most-significant) bit set in word.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static inline int fls (unsigned int x)
+{
+ return 32 - __cntlz(x);
+}
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ return 31 - __cntlz(word);
+}
+#else
+
+/* Use the generic implementation if we don't have the nsa/nsau instructions. */
+
+# include <asm-generic/bitops/ffs.h>
+# include <asm-generic/bitops/__ffs.h>
+# include <asm-generic/bitops/ffz.h>
+# include <asm-generic/bitops/fls.h>
+# include <asm-generic/bitops/__fls.h>
+
+#endif
+
+#include <asm-generic/bitops/fls64.h>
+
+#if XCHAL_HAVE_S32C1I
+
+static inline void set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+}
+
+static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (~mask), "a" (p)
+ : "memory");
+}
+
+static inline void change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " xor %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+}
+
+static inline int
+test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+static inline int
+test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (~mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+static inline int
+test_and_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " xor %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+#else
+
+#include <asm-generic/bitops/atomic.h>
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/sched.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _XTENSA_BITOPS_H */
diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h
new file mode 100644
index 000000000..892aab399
--- /dev/null
+++ b/arch/xtensa/include/asm/bootparam.h
@@ -0,0 +1,50 @@
+/*
+ * include/asm-xtensa/bootparam.h
+ *
+ * Definition of the Linux/Xtensa boot parameter structure
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * (Concept borrowed from the 68K port)
+ */
+
+#ifndef _XTENSA_BOOTPARAM_H
+#define _XTENSA_BOOTPARAM_H
+
+#define BP_VERSION 0x0001
+
+#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
+#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
+#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
+#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console. */
+#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
+#define BP_TAG_FDT 0x1006 /* flat device tree addr */
+
+#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
+#define BP_TAG_LAST 0x7E0B /* last tag */
+
+#ifndef __ASSEMBLY__
+
+/* All records are aligned to 4 bytes */
+
+typedef struct bp_tag {
+ unsigned short id; /* tag id */
+ unsigned short size; /* size of this record excluding the structure*/
+ unsigned long data[0]; /* data */
+} bp_tag_t;
+
+struct bp_meminfo {
+ unsigned long type;
+ unsigned long start;
+ unsigned long end;
+};
+
+#define MEMORY_TYPE_CONVENTIONAL 0x1000
+#define MEMORY_TYPE_NONE 0x2000
+
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/bugs.h b/arch/xtensa/include/asm/bugs.h
new file mode 100644
index 000000000..69b29d198
--- /dev/null
+++ b/arch/xtensa/include/asm/bugs.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-xtensa/bugs.h
+ *
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Xtensa processors don't have any bugs. :)
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_BUGS_H
+#define _XTENSA_BUGS_H
+
+static void check_bugs(void) { }
+
+#endif /* _XTENSA_BUGS_H */
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
new file mode 100644
index 000000000..d2fd932fd
--- /dev/null
+++ b/arch/xtensa/include/asm/cache.h
@@ -0,0 +1,34 @@
+/*
+ * include/asm-xtensa/cache.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHE_H
+#define _XTENSA_CACHE_H
+
+#include <variant/core.h>
+
+#define L1_CACHE_SHIFT XCHAL_DCACHE_LINEWIDTH
+#define L1_CACHE_BYTES XCHAL_DCACHE_LINESIZE
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
+#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
+#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
+#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
+
+/* Maximum cache size per way. */
+#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
+# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
+#else
+# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
+#endif
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#endif /* _XTENSA_CACHE_H */
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
new file mode 100644
index 000000000..34545ecfd
--- /dev/null
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -0,0 +1,214 @@
+/*
+ * include/asm-xtensa/cacheasm.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Tensilica Inc.
+ */
+
+#include <asm/cache.h>
+#include <asm/asmmacro.h>
+#include <linux/stringify.h>
+
+/*
+ * Define cache functions as macros here so that they can be used
+ * by the kernel and boot loader. We should consider moving them to a
+ * library that can be linked by both.
+ *
+ * Locking
+ *
+ * ___unlock_dcache_all
+ * ___unlock_icache_all
+ *
+ * Flush and invaldating
+ *
+ * ___flush_invalidate_dcache_{all|range|page}
+ * ___flush_dcache_{all|range|page}
+ * ___invalidate_dcache_{all|range|page}
+ * ___invalidate_icache_{all|range|page}
+ *
+ */
+
+
+ .macro __loop_cache_unroll ar at insn size line_width max_immed
+
+ .if (1 << (\line_width)) > (\max_immed)
+ .set _reps, 1
+ .elseif (2 << (\line_width)) > (\max_immed)
+ .set _reps, 2
+ .else
+ .set _reps, 4
+ .endif
+
+ __loopi \ar, \at, \size, (_reps << (\line_width))
+ .set _index, 0
+ .rep _reps
+ \insn \ar, _index << (\line_width)
+ .set _index, _index + 1
+ .endr
+ __endla \ar, \at, _reps << (\line_width)
+
+ .endm
+
+
+ .macro __loop_cache_all ar at insn size line_width max_immed
+
+ movi \ar, 0
+ __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
+
+ .endm
+
+
+ .macro __loop_cache_range ar as at insn line_width
+
+ extui \at, \ar, 0, \line_width
+ add \as, \as, \at
+
+ __loops \ar, \as, \at, \line_width
+ \insn \ar, 0
+ __endla \ar, \at, (1 << (\line_width))
+
+ .endm
+
+
+ .macro __loop_cache_page ar at insn line_width max_immed
+
+ __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
+
+ .endm
+
+
+ .macro ___unlock_dcache_all ar at
+
+#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___unlock_icache_all ar at
+
+#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
+ __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
+ XCHAL_ICACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___flush_invalidate_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_all ar at
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
+ XCHAL_ICACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+
+ .macro ___flush_invalidate_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_range ar as at
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+
+ .macro ___flush_invalidate_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_page ar as
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
+#endif
+
+ .endm
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
new file mode 100644
index 000000000..a0d50be5a
--- /dev/null
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -0,0 +1,182 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHEFLUSH_H
+#define _XTENSA_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+
+/*
+ * Lo-level routines for cache flushing.
+ *
+ * invalidate data or instruction cache:
+ *
+ * __invalidate_icache_all()
+ * __invalidate_icache_page(adr)
+ * __invalidate_dcache_page(adr)
+ * __invalidate_icache_range(from,size)
+ * __invalidate_dcache_range(from,size)
+ *
+ * flush data cache:
+ *
+ * __flush_dcache_page(adr)
+ *
+ * flush and invalidate data cache:
+ *
+ * __flush_invalidate_dcache_all()
+ * __flush_invalidate_dcache_page(adr)
+ * __flush_invalidate_dcache_range(from,size)
+ *
+ * specials for cache aliasing:
+ *
+ * __flush_invalidate_dcache_page_alias(vaddr,paddr)
+ * __invalidate_dcache_page_alias(vaddr,paddr)
+ * __invalidate_icache_page_alias(vaddr,paddr)
+ */
+
+extern void __invalidate_dcache_all(void);
+extern void __invalidate_icache_all(void);
+extern void __invalidate_dcache_page(unsigned long);
+extern void __invalidate_icache_page(unsigned long);
+extern void __invalidate_icache_range(unsigned long, unsigned long);
+extern void __invalidate_dcache_range(unsigned long, unsigned long);
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+extern void __flush_invalidate_dcache_all(void);
+extern void __flush_dcache_page(unsigned long);
+extern void __flush_dcache_range(unsigned long, unsigned long);
+extern void __flush_invalidate_dcache_page(unsigned long);
+extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
+#else
+static inline void __flush_dcache_page(unsigned long va)
+{
+}
+static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
+{
+}
+# define __flush_invalidate_dcache_all() __invalidate_dcache_all()
+# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
+# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
+#endif
+
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
+extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
+extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
+#else
+static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+static inline void __invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+#endif
+#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
+extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
+#else
+static inline void __invalidate_icache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+#endif
+
+/*
+ * We have physically tagged caches - nothing to do here -
+ * unless we have cache aliasing.
+ *
+ * Pages can get remapped. Because this might change the 'color' of that page,
+ * we have to flush the cache before the PTE is changed.
+ * (see also Documentation/core-api/cachetlb.rst)
+ */
+
+#if defined(CONFIG_MMU) && \
+ ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
+
+#ifdef CONFIG_SMP
+void flush_cache_all(void);
+void flush_cache_range(struct vm_area_struct*, ulong, ulong);
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_cache_page(struct vm_area_struct*,
+ unsigned long, unsigned long);
+#else
+#define flush_cache_all local_flush_cache_all
+#define flush_cache_range local_flush_cache_range
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page local_flush_cache_page
+#endif
+
+#define local_flush_cache_all() \
+ do { \
+ __flush_invalidate_dcache_all(); \
+ __invalidate_icache_all(); \
+ } while (0)
+
+#define flush_cache_mm(mm) flush_cache_all()
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+#define flush_cache_vmap(start,end) flush_cache_all()
+#define flush_cache_vunmap(start,end) flush_cache_all()
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page*);
+
+void local_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn);
+
+#else
+
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+
+#define flush_cache_vmap(start,end) do { } while (0)
+#define flush_cache_vunmap(start,end) do { } while (0)
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do { } while (0)
+
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page(vma, addr, pfn) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+
+#endif
+
+/* Ensure consistency between data and instruction cache. */
+#define local_flush_icache_range(start, end) \
+ do { \
+ __flush_dcache_range(start, (end) - (start)); \
+ __invalidate_icache_range(start,(end) - (start)); \
+ } while (0)
+
+/* This is not required, see Documentation/core-api/cachetlb.rst */
+#define flush_icache_page(vma,page) do { } while (0)
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+extern void copy_to_user_page(struct vm_area_struct*, struct page*,
+ unsigned long, void*, const void*, unsigned long);
+extern void copy_from_user_page(struct vm_area_struct*, struct page*,
+ unsigned long, void*, const void*, unsigned long);
+
+#else
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ __flush_dcache_range((unsigned long) dst, len); \
+ __invalidate_icache_range((unsigned long) dst, len); \
+ } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif
+
+#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
new file mode 100644
index 000000000..3ae74d7e0
--- /dev/null
+++ b/arch/xtensa/include/asm/checksum.h
@@ -0,0 +1,254 @@
+/*
+ * include/asm-xtensa/checksum.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CHECKSUM_H
+#define _XTENSA_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <linux/uaccess.h>
+#include <variant/core.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the access_ok().
+ */
+static inline
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
+{
+ return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+}
+
+static inline
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *err_ptr)
+{
+ return csum_partial_copy_generic((__force const void *)src, dst,
+ len, sum, err_ptr, NULL);
+}
+
+/*
+ * Fold a partial checksum
+ */
+
+static __inline__ __sum16 csum_fold(__wsum sum)
+{
+ unsigned int __dummy;
+ __asm__("extui %1, %0, 16, 16\n\t"
+ "extui %0 ,%0, 0, 16\n\t"
+ "add %0, %0, %1\n\t"
+ "slli %1, %0, 16\n\t"
+ "add %0, %0, %1\n\t"
+ "extui %0, %0, 16, 16\n\t"
+ "neg %0, %0\n\t"
+ "addi %0, %0, -1\n\t"
+ "extui %0, %0, 0, 16\n\t"
+ : "=r" (sum), "=&r" (__dummy)
+ : "0" (sum));
+ return (__force __sum16)sum;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum, tmp, endaddr;
+
+ __asm__ __volatile__(
+ "sub %0, %0, %0\n\t"
+#if XCHAL_HAVE_LOOPS
+ "loopgtz %2, 2f\n\t"
+#else
+ "beqz %2, 2f\n\t"
+ "slli %4, %2, 2\n\t"
+ "add %4, %4, %1\n\t"
+ "0:\t"
+#endif
+ "l32i %3, %1, 0\n\t"
+ "add %0, %0, %3\n\t"
+ "bgeu %0, %3, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "addi %1, %1, 4\n\t"
+#if !XCHAL_HAVE_LOOPS
+ "blt %1, %4, 0b\n\t"
+#endif
+ "2:\t"
+ /* Since the input registers which are loaded with iph and ihl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
+ "=&r" (endaddr)
+ : "1" (iph), "2" (ihl)
+ : "memory");
+
+ return csum_fold(sum);
+}
+
+static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+
+#ifdef __XTENSA_EL__
+ unsigned long len_proto = (len + proto) << 8;
+#elif defined(__XTENSA_EB__)
+ unsigned long len_proto = len + proto;
+#else
+# error processor byte order undefined!
+#endif
+ __asm__("add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %2\n\t"
+ "bgeu %0, %2, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %3\n\t"
+ "bgeu %0, %3, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ : "=r" (sum), "=r" (len_proto)
+ : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold (csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+ unsigned int __dummy;
+ __asm__("l32i %1, %2, 0\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 4\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 8\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 12\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 0\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 4\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 8\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 12\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %4\n\t"
+ "bgeu %0, %4, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %5\n\t"
+ "bgeu %0, %5, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ : "=r" (sum), "=&r" (__dummy)
+ : "r" (saddr), "r" (daddr),
+ "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
+ : "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst, int len,
+ __wsum sum, int *err_ptr)
+{
+ if (access_ok(VERIFY_WRITE, dst, len))
+ return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
+
+ if (len)
+ *err_ptr = -EFAULT;
+
+ return (__force __wsum)-1; /* invalid checksum */
+}
+#endif
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
new file mode 100644
index 000000000..201e9009e
--- /dev/null
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -0,0 +1,162 @@
+/*
+ * Atomic xchg and cmpxchg operations.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CMPXCHG_H
+#define _XTENSA_CMPXCHG_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+/*
+ * cmpxchg
+ */
+
+static inline unsigned long
+__cmpxchg_u32(volatile int *p, int old, int new)
+{
+#if XCHAL_HAVE_S32C1I
+ __asm__ __volatile__(
+ " wsr %2, scompare1\n"
+ " s32c1i %0, %1, 0\n"
+ : "+a" (new)
+ : "a" (p), "a" (old)
+ : "memory"
+ );
+
+ return new;
+#else
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(TOPLEVEL)"\n"
+ " l32i %0, %1, 0\n"
+ " bne %0, %2, 1f\n"
+ " s32i %3, %1, 0\n"
+ "1:\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (old)
+ : "a" (p), "a" (old), "r" (new)
+ : "a15", "memory");
+ return old;
+#endif
+}
+/* This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg(). */
+
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4: return __cmpxchg_u32(ptr, old, new);
+ default: __cmpxchg_called_with_bad_pointer();
+ return old;
+ }
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof (*(ptr))); \
+ })
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+
+/*
+ * xchg_u32
+ *
+ * Note that a15 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a15 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
+{
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp, result;
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " mov %0, %3\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "memory"
+ );
+ return result;
+#else
+ unsigned long tmp;
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(TOPLEVEL)"\n"
+ " l32i %0, %1, 0\n"
+ " s32i %2, %1, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "a15", "memory");
+ return tmp;
+#endif
+}
+
+#define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+/*
+ * This only works if the compiler isn't horribly bad at optimizing.
+ * gcc-2.5.8 reportedly can't handle this, but I define that one to
+ * be dead anyway.
+ */
+
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 4:
+ return xchg_u32(ptr, x);
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _XTENSA_CMPXCHG_H */
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
new file mode 100644
index 000000000..677501b32
--- /dev/null
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -0,0 +1,177 @@
+/*
+ * include/asm-xtensa/coprocessor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
+ */
+
+
+#ifndef _XTENSA_COPROCESSOR_H
+#define _XTENSA_COPROCESSOR_H
+
+#include <linux/stringify.h>
+#include <variant/core.h>
+#include <variant/tie.h>
+#include <asm/types.h>
+
+#ifdef __ASSEMBLY__
+# include <variant/tie-asm.h>
+
+.macro xchal_sa_start a b
+ .set .Lxchal_pofs_, 0
+ .set .Lxchal_ofs_, 0
+.endm
+
+.macro xchal_sa_align ptr minofs maxofs ofsalign totalign
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1
+ .set .Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_
+.endm
+
+#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
+ | XTHAL_SAS_CC \
+ | XTHAL_SAS_CALR | XTHAL_SAS_CALE )
+
+.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_OPT_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+
+.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_OPT_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+#undef _SELECT
+
+#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
+ | XTHAL_SAS_NOCC \
+ | XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
+
+.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_USER_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+
+.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_USER_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+#undef _SELECT
+
+
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
+ *
+ * XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured.
+ *
+ */
+
+#define XTENSA_HAVE_COPROCESSOR(x) \
+ ((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x)))
+#define XTENSA_HAVE_COPROCESSORS \
+ (XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK)
+#define XTENSA_HAVE_IO_PORT(x) \
+ (XCHAL_CP_PORT_MASK & (1 << (x)))
+#define XTENSA_HAVE_IO_PORTS \
+ XCHAL_CP_PORT_MASK
+
+#ifndef __ASSEMBLY__
+
+
+#if XCHAL_HAVE_CP
+
+#define RSR_CPENABLE(x) do { \
+ __asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \
+ } while(0);
+#define WSR_CPENABLE(x) do { \
+ __asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \
+ } while(0);
+
+#endif /* XCHAL_HAVE_CP */
+
+
+/*
+ * Additional registers.
+ * We define three types of additional registers:
+ * ext: extra registers that are used by the compiler
+ * cpn: optional registers that can be used by a user application
+ * cpX: coprocessor registers that can only be used if the corresponding
+ * CPENABLE bit is set.
+ */
+
+#define XCHAL_SA_REG(list,cc,abi,type,y,name,z,align,size,...) \
+ __REG ## list (cc, abi, type, name, size, align)
+
+#define __REG0(cc,abi,t,name,s,a) __REG0_ ## cc (abi,name)
+#define __REG1(cc,abi,t,name,s,a) __REG1_ ## cc (name)
+#define __REG2(cc,abi,type,...) __REG2_ ## type (__VA_ARGS__)
+
+#define __REG0_0(abi,name)
+#define __REG0_1(abi,name) __REG0_1 ## abi (name)
+#define __REG0_10(name) __u32 name;
+#define __REG0_11(name) __u32 name;
+#define __REG0_12(name)
+
+#define __REG1_0(name) __u32 name;
+#define __REG1_1(name)
+
+#define __REG2_0(n,s,a) __u32 name;
+#define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
+#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
+
+typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t
+ __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
+typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t
+ __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
+
+#if XTENSA_HAVE_COPROCESSORS
+
+typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t
+ __attribute__ ((aligned (XCHAL_CP0_SA_ALIGN)));
+typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t
+ __attribute__ ((aligned (XCHAL_CP1_SA_ALIGN)));
+typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t
+ __attribute__ ((aligned (XCHAL_CP2_SA_ALIGN)));
+typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t
+ __attribute__ ((aligned (XCHAL_CP3_SA_ALIGN)));
+typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t
+ __attribute__ ((aligned (XCHAL_CP4_SA_ALIGN)));
+typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t
+ __attribute__ ((aligned (XCHAL_CP5_SA_ALIGN)));
+typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
+ __attribute__ ((aligned (XCHAL_CP6_SA_ALIGN)));
+typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
+ __attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
+
+extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
+extern void coprocessor_save(void*, int);
+extern void coprocessor_load(void*, int);
+extern void coprocessor_flush(struct thread_info*, int);
+extern void coprocessor_restore(struct thread_info*, int);
+
+extern void coprocessor_release_all(struct thread_info*);
+extern void coprocessor_flush_all(struct thread_info*);
+
+static inline void coprocessor_clear_cpenable(void)
+{
+ unsigned long i = 0;
+ WSR_CPENABLE(i);
+}
+
+#endif /* XTENSA_HAVE_COPROCESSORS */
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _XTENSA_COPROCESSOR_H */
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
new file mode 100644
index 000000000..5d98a7ad4
--- /dev/null
+++ b/arch/xtensa/include/asm/current.h
@@ -0,0 +1,38 @@
+/*
+ * include/asm-xtensa/current.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CURRENT_H
+#define _XTENSA_CURRENT_H
+
+#include <asm/thread_info.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+static inline struct task_struct *get_current(void)
+{
+ return current_thread_info()->task;
+}
+
+#define current get_current()
+
+#else
+
+#define GET_CURRENT(reg,sp) \
+ GET_THREAD_INFO(reg,sp); \
+ l32i reg, reg, TI_TASK \
+
+#endif
+
+
+#endif /* XTENSA_CURRENT_H */
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h
new file mode 100644
index 000000000..24304b39a
--- /dev/null
+++ b/arch/xtensa/include/asm/delay.h
@@ -0,0 +1,75 @@
+/*
+ * include/asm-xtensa/delay.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ */
+
+#ifndef _XTENSA_DELAY_H
+#define _XTENSA_DELAY_H
+
+#include <asm/timex.h>
+#include <asm/param.h>
+
+extern unsigned long loops_per_jiffy;
+
+static inline void __delay(unsigned long loops)
+{
+ if (__builtin_constant_p(loops) && loops < 2)
+ __asm__ __volatile__ ("nop");
+ else if (loops >= 2)
+ /* 2 cycles per loop. */
+ __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
+ : "+r" (loops));
+}
+
+/* Undefined function to get compile-time error */
+void __bad_udelay(void);
+void __bad_ndelay(void);
+
+#define __MAX_UDELAY 30000
+#define __MAX_NDELAY 30000
+
+static inline void __udelay(unsigned long usecs)
+{
+ unsigned long start = get_ccount();
+ unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5;
+
+ /* Note: all variables are unsigned (can wrap around)! */
+ while (((unsigned long)get_ccount()) - start < cycles)
+ cpu_relax();
+}
+
+static inline void udelay(unsigned long usec)
+{
+ if (__builtin_constant_p(usec) && usec >= __MAX_UDELAY)
+ __bad_udelay();
+ else
+ __udelay(usec);
+}
+
+static inline void __ndelay(unsigned long nsec)
+{
+ /*
+ * Inner shift makes sure multiplication doesn't overflow
+ * for legitimate nsec values
+ */
+ unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15;
+ __delay(cycles);
+}
+
+#define ndelay(n) ndelay(n)
+
+static inline void ndelay(unsigned long nsec)
+{
+ if (__builtin_constant_p(nsec) && nsec >= __MAX_NDELAY)
+ __bad_ndelay();
+ else
+ __ndelay(nsec);
+}
+
+#endif
diff --git a/arch/xtensa/include/asm/dma.h b/arch/xtensa/include/asm/dma.h
new file mode 100644
index 000000000..bb099a373
--- /dev/null
+++ b/arch/xtensa/include/asm/dma.h
@@ -0,0 +1,62 @@
+/*
+ * include/asm-xtensa/dma.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_DMA_H
+#define _XTENSA_DMA_H
+
+#include <asm/io.h> /* need byte IO */
+
+/*
+ * This is only to be defined if we have PC-like DMA.
+ * By default this is not true on an Xtensa processor,
+ * however on boards with a PCI bus, such functionality
+ * might be emulated externally.
+ *
+ * NOTE: there still exists driver code that assumes
+ * this is defined, eg. drivers/sound/soundcard.c (as of 2.4).
+ */
+#define MAX_DMA_CHANNELS 8
+
+/*
+ * The maximum virtual address to which DMA transfers
+ * can be performed on this platform.
+ *
+ * NOTE: This is board (platform) specific, not processor-specific!
+ *
+ * NOTE: This assumes DMA transfers can only be performed on
+ * the section of physical memory contiguously mapped in virtual
+ * space for the kernel. For the Xtensa architecture, this
+ * means the maximum possible size of this DMA area is
+ * the size of the statically mapped kernel segment
+ * (XCHAL_KSEG_{CACHED,BYPASS}_SIZE), ie. 128 MB.
+ *
+ * NOTE: When the entire KSEG area is DMA capable, we subtract
+ * one from the max address so that the virt_to_phys() macro
+ * works correctly on the address (otherwise the address
+ * enters another area, and virt_to_phys() may not return
+ * the value desired).
+ */
+
+#ifndef MAX_DMA_ADDRESS
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
+#endif
+
+/* Reserve and release a DMA channel */
+extern int request_dma(unsigned int dmanr, const char * device_id);
+extern void free_dma(unsigned int dmanr);
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+
+#endif
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
new file mode 100644
index 000000000..eacb25a41
--- /dev/null
+++ b/arch/xtensa/include/asm/elf.h
@@ -0,0 +1,207 @@
+/*
+ * include/asm-xtensa/elf.h
+ *
+ * ELF register definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ELF_H
+#define _XTENSA_ELF_H
+
+#include <asm/ptrace.h>
+#include <asm/coprocessor.h>
+
+/* Xtensa processor ELF architecture-magic number */
+
+#define EM_XTENSA 94
+#define EM_XTENSA_OLD 0xABC7
+
+/* Xtensa relocations defined by the ABIs */
+
+#define R_XTENSA_NONE 0
+#define R_XTENSA_32 1
+#define R_XTENSA_RTLD 2
+#define R_XTENSA_GLOB_DAT 3
+#define R_XTENSA_JMP_SLOT 4
+#define R_XTENSA_RELATIVE 5
+#define R_XTENSA_PLT 6
+#define R_XTENSA_OP0 8
+#define R_XTENSA_OP1 9
+#define R_XTENSA_OP2 10
+#define R_XTENSA_ASM_EXPAND 11
+#define R_XTENSA_ASM_SIMPLIFY 12
+#define R_XTENSA_GNU_VTINHERIT 15
+#define R_XTENSA_GNU_VTENTRY 16
+#define R_XTENSA_DIFF8 17
+#define R_XTENSA_DIFF16 18
+#define R_XTENSA_DIFF32 19
+#define R_XTENSA_SLOT0_OP 20
+#define R_XTENSA_SLOT1_OP 21
+#define R_XTENSA_SLOT2_OP 22
+#define R_XTENSA_SLOT3_OP 23
+#define R_XTENSA_SLOT4_OP 24
+#define R_XTENSA_SLOT5_OP 25
+#define R_XTENSA_SLOT6_OP 26
+#define R_XTENSA_SLOT7_OP 27
+#define R_XTENSA_SLOT8_OP 28
+#define R_XTENSA_SLOT9_OP 29
+#define R_XTENSA_SLOT10_OP 30
+#define R_XTENSA_SLOT11_OP 31
+#define R_XTENSA_SLOT12_OP 32
+#define R_XTENSA_SLOT13_OP 33
+#define R_XTENSA_SLOT14_OP 34
+#define R_XTENSA_SLOT0_ALT 35
+#define R_XTENSA_SLOT1_ALT 36
+#define R_XTENSA_SLOT2_ALT 37
+#define R_XTENSA_SLOT3_ALT 38
+#define R_XTENSA_SLOT4_ALT 39
+#define R_XTENSA_SLOT5_ALT 40
+#define R_XTENSA_SLOT6_ALT 41
+#define R_XTENSA_SLOT7_ALT 42
+#define R_XTENSA_SLOT8_ALT 43
+#define R_XTENSA_SLOT9_ALT 44
+#define R_XTENSA_SLOT10_ALT 45
+#define R_XTENSA_SLOT11_ALT 46
+#define R_XTENSA_SLOT12_ALT 47
+#define R_XTENSA_SLOT13_ALT 48
+#define R_XTENSA_SLOT14_ALT 49
+
+/* ELF register definitions. This is needed for core dump support. */
+
+typedef unsigned long elf_greg_t;
+
+typedef struct {
+ elf_greg_t pc;
+ elf_greg_t ps;
+ elf_greg_t lbeg;
+ elf_greg_t lend;
+ elf_greg_t lcount;
+ elf_greg_t sar;
+ elf_greg_t windowstart;
+ elf_greg_t windowbase;
+ elf_greg_t threadptr;
+ elf_greg_t reserved[7+48];
+ elf_greg_t a[64];
+} xtensa_gregset_t;
+
+#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#define ELF_NFPREG 18
+
+typedef unsigned int elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+#define ELF_CORE_COPY_REGS(_eregs, _pregs) \
+ xtensa_elf_core_copy_regs ((xtensa_gregset_t*)&(_eregs), _pregs);
+
+extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+
+#define elf_check_arch(x) ( ( (x)->e_machine == EM_XTENSA ) || \
+ ( (x)->e_machine == EM_XTENSA_OLD ) )
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+
+#ifdef __XTENSA_EL__
+# define ELF_DATA ELFDATA2LSB
+#elif defined(__XTENSA_EB__)
+# define ELF_DATA ELFDATA2MSB
+#else
+# error processor byte order undefined!
+#endif
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_ARCH EM_XTENSA
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this CPU supports. This could be done in user space,
+ * but it's not easy, and we've already done it here.
+ */
+
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ * For the moment, we have only optimizations for the Intel generations,
+ * but that could change...
+ */
+
+#define ELF_PLATFORM (NULL)
+
+/*
+ * The Xtensa processor ABI says that when the program starts, a2
+ * contains a pointer to a function which might be registered using
+ * `atexit'. This provides a mean for the dynamic linker to call
+ * DT_FINI functions for shared libraries that have been loaded before
+ * the code runs.
+ *
+ * A value of 0 tells we have no such handler.
+ *
+ * We might as well make sure everything else is cleared too (except
+ * for the stack pointer in a1), just to make things more
+ * deterministic. Also, clearing a0 terminates debugger backtraces.
+ */
+
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
+ _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
+ _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
+ _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
+ } while (0)
+
+typedef struct {
+ xtregs_opt_t opt;
+ xtregs_user_t user;
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_cp0_t cp0;
+ xtregs_cp1_t cp1;
+ xtregs_cp2_t cp2;
+ xtregs_cp3_t cp3;
+ xtregs_cp4_t cp4;
+ xtregs_cp5_t cp5;
+ xtregs_cp6_t cp6;
+ xtregs_cp7_t cp7;
+#endif
+} elf_xtregs_t;
+
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
+
+struct task_struct;
+
+extern void do_copy_regs (xtensa_gregset_t*, struct pt_regs*,
+ struct task_struct*);
+extern void do_restore_regs (xtensa_gregset_t*, struct pt_regs*,
+ struct task_struct*);
+extern void do_save_fpregs (elf_fpregset_t*, struct pt_regs*,
+ struct task_struct*);
+extern int do_restore_fpregs (elf_fpregset_t*, struct pt_regs*,
+ struct task_struct*);
+
+#endif /* _XTENSA_ELF_H */
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
new file mode 100644
index 000000000..7e25c1b50
--- /dev/null
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -0,0 +1,85 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/pgtable.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the start of the consistent memory region upwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM
+ /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN +
+ (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1,
+#endif
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+
+#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /* Check if this memory layout is broken because fixmap overlaps page
+ * table.
+ */
+ BUILD_BUG_ON(FIXADDR_START <
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+#endif
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel( \
+ pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
+ (vaddr) \
+ )
+
+#endif
diff --git a/arch/xtensa/include/asm/flat.h b/arch/xtensa/include/asm/flat.h
new file mode 100644
index 000000000..b8532d787
--- /dev/null
+++ b/arch/xtensa/include/asm/flat.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_XTENSA_FLAT_H
+#define __ASM_XTENSA_FLAT_H
+
+#include <asm/unaligned.h>
+
+#define flat_argvp_envp_on_stack() 0
+#define flat_old_ram_flag(flags) (flags)
+#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr, u32 *persistent)
+{
+ *addr = get_unaligned((__force u32 *)rp);
+ return 0;
+}
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+ put_unaligned(addr, (__force u32 *)rp);
+ return 0;
+}
+#define flat_get_relocate_addr(rel) (rel)
+#define flat_set_persistent(relval, p) 0
+
+#endif /* __ASM_XTENSA_FLAT_H */
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
new file mode 100644
index 000000000..6c6d9a9f1
--- /dev/null
+++ b/arch/xtensa/include/asm/ftrace.h
@@ -0,0 +1,40 @@
+/*
+ * arch/xtensa/include/asm/ftrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_FTRACE_H
+#define _XTENSA_FTRACE_H
+
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+#define ftrace_return_address0 ({ unsigned long a0, a1; \
+ __asm__ __volatile__ ( \
+ "mov %0, a0\n" \
+ "mov %1, a1\n" \
+ : "=r"(a0), "=r"(a1)); \
+ MAKE_PC_FROM_RA(a0, a1); })
+
+#ifdef CONFIG_FRAME_POINTER
+extern unsigned long return_address(unsigned level);
+#define ftrace_return_address(n) return_address(n)
+#endif
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+#define MCOUNT_INSN_SIZE 3
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+#define mcount _mcount
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* _XTENSA_FTRACE_H */
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
new file mode 100644
index 000000000..5bfbc1c40
--- /dev/null
+++ b/arch/xtensa/include/asm/futex.h
@@ -0,0 +1,127 @@
+/*
+ * Atomic futex routines
+ *
+ * Based on the PowerPC implementataion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2013 TangoTec Ltd.
+ *
+ * Baruch Siach <baruch@tkos.co.il>
+ */
+
+#ifndef _ASM_XTENSA_FUTEX_H
+#define _ASM_XTENSA_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile( \
+ "1: l32i %0, %2, 0\n" \
+ insn "\n" \
+ " wsr %0, scompare1\n" \
+ "2: s32c1i %1, %2, 0\n" \
+ " bne %1, %0, 1b\n" \
+ " movi %1, 0\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "4: .long 3b\n" \
+ "5: l32r %0, 4b\n" \
+ " movi %1, %3\n" \
+ " jx %0\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .long 1b,5b,2b,5b\n" \
+ " .previous\n" \
+ : "=&r" (oldval), "=&r" (ret) \
+ : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
+ : "memory")
+
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
+{
+ int oldval = 0, ret;
+
+#if !XCHAL_HAVE_S32C1I
+ return -ENOSYS;
+#endif
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr,
+ ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr,
+ oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+#if !XCHAL_HAVE_S32C1I
+ return -ENOSYS;
+#endif
+
+ __asm__ __volatile__ (
+ " # futex_atomic_cmpxchg_inatomic\n"
+ " wsr %5, scompare1\n"
+ "1: s32c1i %1, %4, 0\n"
+ " s32i %1, %6, 0\n"
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ " .align 4\n"
+ "3: .long 2b\n"
+ "4: l32r %1, 3b\n"
+ " movi %0, %7\n"
+ " jx %1\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ " .long 1b,4b\n"
+ " .previous\n"
+ : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
+ : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
+ : "memory");
+
+ return ret;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_XTENSA_FUTEX_H */
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
new file mode 100644
index 000000000..04e9340ea
--- /dev/null
+++ b/arch/xtensa/include/asm/highmem.h
@@ -0,0 +1,100 @@
+/*
+ * include/asm-xtensa/highmem.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_HIGHMEM_H
+#define _XTENSA_HIGHMEM_H
+
+#include <linux/wait.h>
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
+
+#define PKMAP_BASE ((FIXADDR_START - \
+ (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
+#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS)
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot PAGE_KERNEL_EXEC
+
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+#define get_pkmap_color get_pkmap_color
+static inline int get_pkmap_color(struct page *page)
+{
+ return DCACHE_ALIAS(page_to_phys(page));
+}
+
+extern unsigned int last_pkmap_nr_arr[];
+
+static inline unsigned int get_next_pkmap_nr(unsigned int color)
+{
+ last_pkmap_nr_arr[color] =
+ (last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK;
+ return last_pkmap_nr_arr[color] + color;
+}
+
+static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
+{
+ return pkmap_nr < DCACHE_N_COLORS;
+}
+
+static inline int get_pkmap_entries_count(unsigned int color)
+{
+ return LAST_PKMAP / DCACHE_N_COLORS;
+}
+
+extern wait_queue_head_t pkmap_map_wait_arr[];
+
+static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
+{
+ return pkmap_map_wait_arr + color;
+}
+#endif
+
+extern pte_t *pkmap_page_table;
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+
+static inline void *kmap(struct page *page)
+{
+ /* Check if this memory layout is broken because PKMAP overlaps
+ * page table.
+ */
+ BUILD_BUG_ON(PKMAP_BASE <
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_high(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+static inline void flush_cache_kmaps(void)
+{
+ flush_cache_all();
+}
+
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
+void kmap_init(void);
+
+#endif
diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000..9f119c1ca
--- /dev/null
+++ b/arch/xtensa/include/asm/hw_breakpoint.h
@@ -0,0 +1,61 @@
+/*
+ * Xtensa hardware breakpoints/watchpoints handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef __ASM_XTENSA_HW_BREAKPOINT_H
+#define __ASM_XTENSA_HW_BREAKPOINT_H
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <uapi/linux/hw_breakpoint.h>
+
+/* Breakpoint */
+#define XTENSA_BREAKPOINT_EXECUTE 0
+
+/* Watchpoints */
+#define XTENSA_BREAKPOINT_LOAD 1
+#define XTENSA_BREAKPOINT_STORE 2
+
+struct arch_hw_breakpoint {
+ unsigned long address;
+ u16 len;
+ u16 type;
+};
+
+struct perf_event_attr;
+struct perf_event;
+struct pt_regs;
+struct task_struct;
+
+int hw_breakpoint_slots(int type);
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data);
+
+int arch_install_hw_breakpoint(struct perf_event *bp);
+void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+void hw_breakpoint_pmu_read(struct perf_event *bp);
+int check_hw_breakpoint(struct pt_regs *regs);
+void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
+
+#else
+
+struct task_struct;
+
+static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+}
+
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif /* __ASM_XTENSA_HW_BREAKPOINT_H */
diff --git a/arch/xtensa/include/asm/hw_irq.h b/arch/xtensa/include/asm/hw_irq.h
new file mode 100644
index 000000000..3ddbea759
--- /dev/null
+++ b/arch/xtensa/include/asm/hw_irq.h
@@ -0,0 +1,14 @@
+/*
+ * include/asm-xtensa/hw_irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_HW_IRQ_H
+#define _XTENSA_HW_IRQ_H
+
+#endif
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
new file mode 100644
index 000000000..10e9852b2
--- /dev/null
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -0,0 +1,216 @@
+/*
+ * arch/xtensa/include/asm/initialize_mmu.h
+ *
+ * Initializes MMU:
+ *
+ * For the new V3 MMU we remap the TLB from virtual == physical
+ * to the standard Linux mapping used in earlier MMU's.
+ *
+ * The the MMU we also support a new configuration register that
+ * specifies how the S32C1I instruction operates with the cache
+ * controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica, Inc.
+ *
+ * Marc Gauthier <marc@tensilica.com>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#ifndef _XTENSA_INITIALIZE_MMU_H
+#define _XTENSA_INITIALIZE_MMU_H
+
+#include <asm/pgtable.h>
+#include <asm/vectors.h>
+
+#if XCHAL_HAVE_PTP_MMU
+#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#else
+#define CA_WRITEBACK (0x4)
+#endif
+
+#ifndef XCHAL_SPANNING_WAY
+#define XCHAL_SPANNING_WAY 0
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define XTENSA_HWVERSION_RC_2009_0 230000
+
+ .macro initialize_mmu
+
+#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+/*
+ * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
+ * For details see Documentation/xtensa/atomctl.txt
+ */
+#if XCHAL_DCACHE_IS_COHERENT
+ movi a3, 0x25 /* For SMP/MX -- internal for writeback,
+ * RCW otherwise
+ */
+#else
+ movi a3, 0x29 /* non-MX -- Most cores use Std Memory
+ * Controlers which usually can't use RCW
+ */
+#endif
+ wsr a3, atomctl
+#endif /* XCHAL_HAVE_S32C1I &&
+ * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+ */
+
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/*
+ * Have MMU v3
+ */
+
+#if !XCHAL_HAVE_VECBASE
+# error "MMU v3 requires reloc vectors"
+#endif
+
+ movi a1, 0
+ _call0 1f
+ _j 2f
+
+ .align 4
+1: movi a2, 0x10000000
+
+#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
+#define TEMP_MAPPING_VADDR 0x40000000
+#else
+#define TEMP_MAPPING_VADDR 0x00000000
+#endif
+
+ /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
+
+ movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
+ idtlb a2
+ iitlb a2
+ isync
+
+ /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
+ * and jump to the new mapping.
+ */
+
+ srli a3, a0, 27
+ slli a3, a3, 27
+ addi a3, a3, CA_BYPASS
+ addi a7, a2, 5 - XCHAL_SPANNING_WAY
+ wdtlb a3, a7
+ witlb a3, a7
+ isync
+
+ slli a4, a0, 5
+ srli a4, a4, 5
+ addi a5, a2, -XCHAL_SPANNING_WAY
+ add a4, a4, a5
+ jx a4
+
+ /* Step 3: unmap everything other than current area.
+ * Start at 0x60000000, wrap around, and end with 0x20000000
+ */
+2: movi a4, 0x20000000
+ add a5, a2, a4
+3: idtlb a5
+ iitlb a5
+ add a5, a5, a4
+ bne a5, a2, 3b
+
+ /* Step 4: Setup MMU with the requested static mappings. */
+
+ movi a6, 0x01000000
+ wsr a6, ITLBCFG
+ wsr a6, DTLBCFG
+ isync
+
+ movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+#ifdef CONFIG_XTENSA_KSEG_512M
+ movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+#endif
+
+ movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
+ movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
+ movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+ isync
+
+ /* Jump to self, using final mappings. */
+ movi a4, 1f
+ jx a4
+
+1:
+ /* Step 5: remove temporary mapping. */
+ idtlb a7
+ iitlb a7
+ isync
+
+ movi a0, 0
+ wsr a0, ptevaddr
+ rsync
+
+#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
+ XCHAL_HAVE_SPANNING_WAY */
+
+ .endm
+
+ .macro initialize_cacheattr
+
+#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
+#if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
+#error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
+#endif
+
+ movi a5, XCHAL_SPANNING_WAY
+ movi a6, ~_PAGE_ATTRIB_MASK
+ movi a4, CONFIG_MEMMAP_CACHEATTR
+ movi a8, 0x20000000
+1:
+ rdtlb1 a3, a5
+ xor a3, a3, a4
+ and a3, a3, a6
+ xor a3, a3, a4
+ wdtlb a3, a5
+ ritlb1 a3, a5
+ xor a3, a3, a4
+ and a3, a3, a6
+ xor a3, a3, a4
+ witlb a3, a5
+
+ add a5, a5, a8
+ srli a4, a4, 4
+ bgeu a5, a8, 1b
+
+ isync
+#endif
+
+ .endm
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _XTENSA_INITIALIZE_MMU_H */
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
new file mode 100644
index 000000000..acc5bb2cf
--- /dev/null
+++ b/arch/xtensa/include/asm/io.h
@@ -0,0 +1,85 @@
+/*
+ * include/asm-xtensa/io.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IO_H
+#define _XTENSA_IO_H
+
+#ifdef __KERNEL__
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <asm/vectors.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <linux/types.h>
+
+#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
+#define IO_SPACE_LIMIT ~0
+
+#ifdef CONFIG_MMU
+
+void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
+void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
+void xtensa_iounmap(volatile void __iomem *addr);
+
+/*
+ * Return the virtual address for the specified bus memory.
+ */
+static inline void __iomem *ioremap_nocache(unsigned long offset,
+ unsigned long size)
+{
+ if (offset >= XCHAL_KIO_PADDR
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
+ return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
+ else
+ return xtensa_ioremap_nocache(offset, size);
+}
+
+static inline void __iomem *ioremap_cache(unsigned long offset,
+ unsigned long size)
+{
+ if (offset >= XCHAL_KIO_PADDR
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
+ return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
+ else
+ return xtensa_ioremap_cache(offset, size);
+}
+#define ioremap_cache ioremap_cache
+#define ioremap_nocache ioremap_nocache
+
+#define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
+
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+{
+ return ioremap_nocache(offset, size);
+}
+
+static inline void iounmap(volatile void __iomem *addr)
+{
+ unsigned long va = (unsigned long) addr;
+
+ if (!(va >= XCHAL_KIO_CACHED_VADDR &&
+ va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
+ !(va >= XCHAL_KIO_BYPASS_VADDR &&
+ va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
+ xtensa_iounmap(addr);
+}
+
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+#endif /* CONFIG_MMU */
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/io.h>
+
+#endif /* _XTENSA_IO_H */
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
new file mode 100644
index 000000000..6c6ed23e0
--- /dev/null
+++ b/arch/xtensa/include/asm/irq.h
@@ -0,0 +1,42 @@
+/*
+ * include/asm-xtensa/irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IRQ_H
+#define _XTENSA_IRQ_H
+
+#include <linux/init.h>
+#include <variant/core.h>
+
+#ifdef CONFIG_PLATFORM_NR_IRQS
+# define PLATFORM_NR_IRQS CONFIG_PLATFORM_NR_IRQS
+#else
+# define PLATFORM_NR_IRQS 0
+#endif
+#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
+#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return (irq);
+}
+
+struct irqaction;
+struct irq_domain;
+
+void migrate_irqs(void);
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+ unsigned long int_irq, unsigned long ext_irq,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw);
+unsigned xtensa_map_ext_irq(unsigned ext_irq);
+unsigned xtensa_get_ext_irq_no(unsigned irq);
+
+#endif /* _XTENSA_IRQ_H */
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
new file mode 100644
index 000000000..407606e57
--- /dev/null
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -0,0 +1,82 @@
+/*
+ * Xtensa IRQ flags handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_IRQFLAGS_H
+#define _XTENSA_IRQFLAGS_H
+
+#include <linux/types.h>
+#include <asm/processor.h>
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ asm volatile("rsr %0, ps" : "=a" (flags));
+ return flags;
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+ unsigned long tmp;
+
+ asm volatile("rsr %0, ps\t\n"
+ "extui %1, %0, 0, 4\t\n"
+ "bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n"
+ "rsil %0, "__stringify(LOCKLEVEL)"\n"
+ "1:"
+ : "=a" (flags), "=a" (tmp) :: "memory");
+#else
+ asm volatile("rsr %0, ps\t\n"
+ "or %0, %0, %1\t\n"
+ "xsr %0, ps\t\n"
+ "rsync"
+ : "=&a" (flags) : "a" (LOCKLEVEL) : "memory");
+#endif
+#else
+ asm volatile("rsil %0, "__stringify(LOCKLEVEL)
+ : "=a" (flags) :: "memory");
+#endif
+ return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ arch_local_irq_save();
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long flags;
+ asm volatile("rsil %0, 0" : "=a" (flags) :: "memory");
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ asm volatile("wsr %0, ps; rsync"
+ :: "a" (flags) : "memory");
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL
+#error "XCHAL_EXCM_LEVEL and 1<<PS_EXCM_BIT must be no less than LOCKLEVEL"
+#endif
+ return (flags & (PS_INTLEVEL_MASK | (1 << PS_EXCM_BIT))) >= LOCKLEVEL;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* _XTENSA_IRQFLAGS_H */
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h
new file mode 100644
index 000000000..216b6f32c
--- /dev/null
+++ b/arch/xtensa/include/asm/kasan.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <asm/kmem_layout.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+/* Start of area covered by KASAN */
+#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
+/* Start of the shadow map */
+#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
+/* Size of the shadow map */
+#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
+/* Offset for mem to shadow address transformation */
+#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+
+void __init kasan_early_init(void);
+void __init kasan_init(void);
+
+#else
+
+static inline void kasan_early_init(void)
+{
+}
+
+static inline void kasan_init(void)
+{
+}
+
+#endif
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
new file mode 100644
index 000000000..9c12babc0
--- /dev/null
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -0,0 +1,75 @@
+/*
+ * Kernel virtual memory layout definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_KMEM_LAYOUT_H
+#define _XTENSA_KMEM_LAYOUT_H
+
+#include <asm/types.h>
+
+#ifdef CONFIG_MMU
+
+/*
+ * Fixed TLB translations in the processor.
+ */
+
+#define XCHAL_PAGE_TABLE_VADDR __XTENSA_UL_CONST(0x80000000)
+#define XCHAL_PAGE_TABLE_SIZE __XTENSA_UL_CONST(0x00400000)
+
+#if defined(CONFIG_XTENSA_KSEG_MMU_V2)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_TLB_WAY 5
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_256M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xb0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_512M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xa0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#else
+#error Unsupported KSEG configuration
+#endif
+
+#ifdef CONFIG_KSEG_PADDR
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(CONFIG_KSEG_PADDR)
+#else
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
+#endif
+
+#if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1)
+#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
+#endif
+
+#endif
+
+#ifndef CONFIG_KASAN
+#define KERNEL_STACK_SHIFT 13
+#else
+#define KERNEL_STACK_SHIFT 15
+#endif
+#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT)
+
+#endif
diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h
new file mode 100644
index 000000000..0ba997323
--- /dev/null
+++ b/arch/xtensa/include/asm/linkage.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif
diff --git a/arch/xtensa/include/asm/mmu.h b/arch/xtensa/include/asm/mmu.h
new file mode 100644
index 000000000..71afe418d
--- /dev/null
+++ b/arch/xtensa/include/asm/mmu.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMU_H
+#define _XTENSA_MMU_H
+
+#ifndef CONFIG_MMU
+#include <asm-generic/mmu.h>
+#else
+
+typedef struct {
+ unsigned long asid[NR_CPUS];
+ unsigned int cpu;
+} mm_context_t;
+
+#endif /* CONFIG_MMU */
+#endif /* _XTENSA_MMU_H */
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
new file mode 100644
index 000000000..de5e6cbba
--- /dev/null
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -0,0 +1,159 @@
+/*
+ * Switch an MMU context.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMU_CONTEXT_H
+#define _XTENSA_MMU_CONTEXT_H
+
+#ifndef CONFIG_MMU
+#include <asm/nommu_context.h>
+#else
+
+#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+
+#include <asm/vectors.h>
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/mm_hooks.h>
+#include <asm-generic/percpu.h>
+
+#if (XCHAL_HAVE_TLBS != 1)
+# error "Linux must have an MMU!"
+#endif
+
+DECLARE_PER_CPU(unsigned long, asid_cache);
+#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
+
+/*
+ * NO_CONTEXT is the invalid ASID value that we don't ever assign to
+ * any user or kernel context. We use the reserved values in the
+ * ASID_INSERT macro below.
+ *
+ * 0 invalid
+ * 1 kernel
+ * 2 reserved
+ * 3 reserved
+ * 4...255 available
+ */
+
+#define NO_CONTEXT 0
+#define ASID_USER_FIRST 4
+#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
+#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
+
+void init_mmu(void);
+void init_kio(void);
+
+static inline void set_rasid_register (unsigned long val)
+{
+ __asm__ __volatile__ (" wsr %0, rasid\n\t"
+ " isync\n" : : "a" (val));
+}
+
+static inline unsigned long get_rasid_register (void)
+{
+ unsigned long tmp;
+ __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
+ return tmp;
+}
+
+static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ unsigned long asid = cpu_asid_cache(cpu);
+ if ((++asid & ASID_MASK) == 0) {
+ /*
+ * Start new asid cycle; continue counting with next
+ * incarnation bits; skipping over 0, 1, 2, 3.
+ */
+ local_flush_tlb_all();
+ asid += ASID_USER_FIRST;
+ }
+ cpu_asid_cache(cpu) = asid;
+ mm->context.asid[cpu] = asid;
+ mm->context.cpu = cpu;
+}
+
+static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ /*
+ * Check if our ASID is of an older version and thus invalid.
+ */
+
+ if (mm) {
+ unsigned long asid = mm->context.asid[cpu];
+
+ if (asid == NO_CONTEXT ||
+ ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
+ get_new_mmu_context(mm, cpu);
+ }
+}
+
+static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
+{
+ get_mmu_context(mm, cpu);
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
+ invalidate_page_directory();
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
+ * to -1 says the process has never run on any core.
+ */
+
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ mm->context.asid[cpu] = NO_CONTEXT;
+ }
+ mm->context.cpu = -1;
+ return 0;
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+ int migrated = next->context.cpu != cpu;
+ /* Flush the icache if we migrated to a new core. */
+ if (migrated) {
+ __invalidate_icache_all();
+ next->context.cpu = cpu;
+ }
+ if (migrated || prev != next)
+ activate_context(next, cpu);
+}
+
+#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+ invalidate_page_directory();
+}
+
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ /* Nothing to do. */
+
+}
+
+#endif /* CONFIG_MMU */
+#endif /* _XTENSA_MMU_CONTEXT_H */
diff --git a/arch/xtensa/include/asm/module.h b/arch/xtensa/include/asm/module.h
new file mode 100644
index 000000000..488b40c6f
--- /dev/null
+++ b/arch/xtensa/include/asm/module.h
@@ -0,0 +1,20 @@
+/*
+ * include/asm-xtensa/module.h
+ *
+ * This file contains the module code specific to the Xtensa architecture.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MODULE_H
+#define _XTENSA_MODULE_H
+
+#define MODULE_ARCH_VERMAGIC "xtensa-" __stringify(XCHAL_CORE_ID) " "
+
+#include <asm-generic/module.h>
+
+#endif /* _XTENSA_MODULE_H */
diff --git a/arch/xtensa/include/asm/mxregs.h b/arch/xtensa/include/asm/mxregs.h
new file mode 100644
index 000000000..73dcc5456
--- /dev/null
+++ b/arch/xtensa/include/asm/mxregs.h
@@ -0,0 +1,46 @@
+/*
+ * Xtensa MX interrupt distributor
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MXREGS_H
+#define _XTENSA_MXREGS_H
+
+/*
+ * RER/WER at, as Read/write external register
+ * at: value
+ * as: address
+ *
+ * Address Value
+ * 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p
+ * 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p
+ * 0180 0...0m..m Clear enable specified by mask (m)
+ * 0184 0...0m..m Set enable specified by mask (m)
+ * 0190 0...0x..x 8-bit IPI partition register
+ * VVVVVVVVPPPPUUUUUUUUUUUUUUUUU
+ * V (10-bit) Release/Version
+ * P ( 4-bit) Number of cores - 1
+ * U (18-bit) ID
+ * 01a0 i.......i 32-bit ConfigID
+ * 0200 0...0m..m RunStall core 'n'
+ * 0220 c Cache coherency enabled
+ */
+
+#define MIROUT(irq) (0x000 + (irq))
+#define MIPICAUSE(cpu) (0x100 + (cpu))
+#define MIPISET(cause) (0x140 + (cause))
+#define MIENG 0x180
+#define MIENGSET 0x184
+#define MIASG 0x188 /* Read Global Assert Register */
+#define MIASGSET 0x18c /* Set Global Addert Regiter */
+#define MIPIPART 0x190
+#define SYSCFGID 0x1a0
+#define MPSCORE 0x200
+#define CCON 0x220
+
+#endif /* _XTENSA_MXREGS_H */
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
new file mode 100644
index 000000000..37251b2ef
--- /dev/null
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+static inline void init_mmu(void)
+{
+}
+
+static inline void init_kio(void)
+{
+}
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+
+static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm)
+{
+}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
new file mode 100644
index 000000000..09c56cba4
--- /dev/null
+++ b/arch/xtensa/include/asm/page.h
@@ -0,0 +1,199 @@
+/*
+ * include/asm-xtensa/page.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PAGE_H
+#define _XTENSA_PAGE_H
+
+#include <asm/processor.h>
+#include <asm/types.h>
+#include <asm/cache.h>
+#include <asm/kmem_layout.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef CONFIG_MMU
+#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
+#define PHYS_OFFSET XCHAL_KSEG_PADDR
+#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
+ PHYS_PFN(XCHAL_KSEG_SIZE))
+#else
+#define PAGE_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
+#define PHYS_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
+#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
+#endif
+
+/*
+ * Cache aliasing:
+ *
+ * If the cache size for one way is greater than the page size, we have to
+ * deal with cache aliasing. The cache index is wider than the page size:
+ *
+ * | |cache| cache index
+ * | pfn |off| virtual address
+ * |xxxx:X|zzz|
+ * | : | |
+ * | \ / | |
+ * |trans.| |
+ * | / \ | |
+ * |yyyy:Y|zzz| physical address
+ *
+ * When the page number is translated to the physical page address, the lowest
+ * bit(s) (X) that are part of the cache index are also translated (Y).
+ * If this translation changes bit(s) (X), the cache index is also afected,
+ * thus resulting in a different cache line than before.
+ * The kernel does not provide a mechanism to ensure that the page color
+ * (represented by this bit) remains the same when allocated or when pages
+ * are remapped. When user pages are mapped into kernel space, the color of
+ * the page might also change.
+ *
+ * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
+ * to temporarily map a patch so we can match the color.
+ */
+
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
+# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
+# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
+# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
+#else
+# define DCACHE_ALIAS_ORDER 0
+# define DCACHE_ALIAS(a) ((void)(a), 0)
+#endif
+#define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER)
+
+#if ICACHE_WAY_SIZE > PAGE_SIZE
+# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
+# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
+# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
+# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
+#else
+# define ICACHE_ALIAS_ORDER 0
+#endif
+
+
+#ifdef __ASSEMBLY__
+
+#define __pgprot(x) (x)
+
+#else
+
+/*
+ * These are used to make use of C type-checking..
+ */
+
+typedef struct { unsigned long pte; } pte_t; /* page table entry */
+typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+/*
+ * Pure 2^n version of get_order
+ * Use 'nsau' instructions if supported by the processor or the generic version.
+ */
+
+#if XCHAL_HAVE_NSA
+
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ int lz;
+ asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
+ return 32 - lz;
+}
+
+#else
+
+# include <asm-generic/getorder.h>
+
+#endif
+
+struct page;
+struct vm_area_struct;
+extern void clear_page(void *page);
+extern void copy_page(void *to, void *from);
+
+/*
+ * If we have cache aliasing and writeback caches, we might have to do
+ * some extra work
+ */
+
+#if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
+extern void clear_page_alias(void *vaddr, unsigned long paddr);
+extern void copy_page_alias(void *to, void *from,
+ unsigned long to_paddr, unsigned long from_paddr);
+
+#define clear_user_highpage clear_user_highpage
+void clear_user_highpage(struct page *page, unsigned long vaddr);
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+#else
+# define clear_user_page(page, vaddr, pg) clear_page(page)
+# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+#endif
+
+/*
+ * This handles the memory map. We handle pages at
+ * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
+ * These macros are for conversion of kernel address, not user
+ * addresses.
+ */
+
+#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
+#ifdef CONFIG_MMU
+static inline unsigned long ___pa(unsigned long va)
+{
+ unsigned long off = va - PAGE_OFFSET;
+
+ if (off >= XCHAL_KSEG_SIZE)
+ off -= XCHAL_KSEG_SIZE;
+
+ return off + PHYS_OFFSET;
+}
+#define __pa(x) ___pa((unsigned long)(x))
+#else
+#define __pa(x) \
+ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#endif
+#define __va(x) \
+ ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
+#define pfn_valid(pfn) \
+ ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+#ifdef CONFIG_DISCONTIGMEM
+# error CONFIG_DISCONTIGMEM not supported
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#endif /* __ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#endif /* _XTENSA_PAGE_H */
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
new file mode 100644
index 000000000..0b68c76ec
--- /dev/null
+++ b/arch/xtensa/include/asm/pci-bridge.h
@@ -0,0 +1,88 @@
+/*
+ * include/asm-xtensa/pci-bridge.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PCI_BRIDGE_H
+#define _XTENSA_PCI_BRIDGE_H
+
+#ifdef __KERNEL__
+
+struct device_node;
+struct pci_controller;
+
+/*
+ * pciauto_bus_scan() enumerates the pci space.
+ */
+
+extern int pciauto_bus_scan(struct pci_controller *, int);
+
+struct pci_space {
+ unsigned long start;
+ unsigned long end;
+ unsigned long base;
+};
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+
+struct pci_controller {
+ int index; /* used for pci_controller_num */
+ struct pci_controller *next;
+ struct pci_bus *bus;
+ void *arch_data;
+
+ int first_busno;
+ int last_busno;
+
+ struct pci_ops *ops;
+ volatile unsigned int *cfg_addr;
+ volatile unsigned char *cfg_data;
+
+ /* Currently, we limit ourselves to 1 IO range and 3 mem
+ * ranges since the common pci_bus structure can't handle more
+ */
+ struct resource io_resource;
+ struct resource mem_resources[3];
+ int mem_resource_count;
+
+ /* Host bridge I/O and Memory space
+ * Used for BAR placement algorithms
+ */
+ struct pci_space io_space;
+ struct pci_space mem_space;
+
+ /* Return the interrupt number fo a device. */
+ int (*map_irq)(struct pci_dev*, u8, u8);
+
+};
+
+static inline void pcibios_init_resource(struct resource *res,
+ unsigned long start, unsigned long end, int flags, char *name)
+{
+ res->start = start;
+ res->end = end;
+ res->flags = flags;
+ res->name = name;
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+}
+
+
+/* These are used for config access before all the PCI probing has been done. */
+int early_read_config_byte(struct pci_controller*, int, int, int, u8*);
+int early_read_config_word(struct pci_controller*, int, int, int, u16*);
+int early_read_config_dword(struct pci_controller*, int, int, int, u32*);
+int early_write_config_byte(struct pci_controller*, int, int, int, u8);
+int early_write_config_word(struct pci_controller*, int, int, int, u16);
+int early_write_config_dword(struct pci_controller*, int, int, int, u32);
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_PCI_BRIDGE_H */
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
new file mode 100644
index 000000000..883024054
--- /dev/null
+++ b/arch/xtensa/include/asm/pci.h
@@ -0,0 +1,53 @@
+/*
+ * linux/include/asm-xtensa/pci.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PCI_H
+#define _XTENSA_PCI_H
+
+#ifdef __KERNEL__
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ * already-configured bus numbers - to be used for buggy BIOSes
+ * or architectures with incomplete PCI setup by the loader
+ */
+
+#define pcibios_assign_all_busses() 0
+
+/* Assume some values. (We should revise them, if necessary) */
+
+#define PCIBIOS_MIN_IO 0x2000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+/* Dynamic DMA mapping stuff.
+ * Xtensa has everything mapped statically like x86.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+/* The PCI address space does equal the physical memory address space.
+ * The networking and block device layers use this boolean for bounce buffer
+ * decisions.
+ */
+
+/* Tell PCI code what kind of PCI resource mappings we support */
+#define HAVE_PCI_MMAP 1
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
+#define arch_can_pci_mmap_io() 1
+
+#endif /* __KERNEL__ */
+
+/* Generic PCI */
+#include <asm-generic/pci.h>
+
+#endif /* _XTENSA_PCI_H */
diff --git a/arch/xtensa/include/asm/perf_event.h b/arch/xtensa/include/asm/perf_event.h
new file mode 100644
index 000000000..5aa4590ac
--- /dev/null
+++ b/arch/xtensa/include/asm/perf_event.h
@@ -0,0 +1,4 @@
+#ifndef __ASM_XTENSA_PERF_EVENT_H
+#define __ASM_XTENSA_PERF_EVENT_H
+
+#endif /* __ASM_XTENSA_PERF_EVENT_H */
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
new file mode 100644
index 000000000..1065bc8bc
--- /dev/null
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -0,0 +1,85 @@
+/*
+ * include/asm-xtensa/pgalloc.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PGALLOC_H
+#define _XTENSA_PGALLOC_H
+
+#ifdef __KERNEL__
+
+#include <linux/highmem.h>
+#include <linux/slab.h>
+
+/*
+ * Allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+
+#define pmd_populate_kernel(mm, pmdp, ptep) \
+ (pmd_val(*(pmdp)) = ((unsigned long)ptep))
+#define pmd_populate(mm, pmdp, page) \
+ (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline pgd_t*
+pgd_alloc(struct mm_struct *mm)
+{
+ return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *ptep;
+ int i;
+
+ ptep = (pte_t *)__get_free_page(GFP_KERNEL);
+ if (!ptep)
+ return NULL;
+ for (i = 0; i < 1024; i++)
+ pte_clear(NULL, 0, ptep + i);
+ return ptep;
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long addr)
+{
+ pte_t *pte;
+ struct page *page;
+
+ pte = pte_alloc_one_kernel(mm, addr);
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ return page;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ pgtable_page_dtor(pte);
+ __free_page(pte);
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_PGALLOC_H */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
new file mode 100644
index 000000000..29cfe421c
--- /dev/null
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -0,0 +1,454 @@
+/*
+ * include/asm-xtensa/pgtable.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PGTABLE_H
+#define _XTENSA_PGTABLE_H
+
+#define __ARCH_USE_5LEVEL_HACK
+#include <asm/page.h>
+#include <asm/kmem_layout.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/*
+ * We only use two ring levels, user and kernel space.
+ */
+
+#ifdef CONFIG_MMU
+#define USER_RING 1 /* user ring level */
+#else
+#define USER_RING 0
+#endif
+#define KERNEL_RING 0 /* kernel ring level */
+
+/*
+ * The Xtensa architecture port of Linux has a two-level page table system,
+ * i.e. the logical three-level Linux page table layout is folded.
+ * Each task has the following memory page tables:
+ *
+ * PGD table (page directory), ie. 3rd-level page table:
+ * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
+ * (Architectures that don't have the PMD folded point to the PMD tables)
+ *
+ * The pointer to the PGD table for a given task can be retrieved from
+ * the task structure (struct task_struct*) t, e.g. current():
+ * (t->mm ? t->mm : t->active_mm)->pgd
+ *
+ * PMD tables (page middle-directory), ie. 2nd-level page tables:
+ * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1).
+ *
+ * PTE tables (page table entry), ie. 1st-level page tables:
+ * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
+ * invalid_pte_table for absent mappings.
+ *
+ * The individual pages are 4 kB big with special pages for the empty_zero_page.
+ */
+
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * Entries per page directory level: we use two-level, so
+ * we don't really have any PMD directory physically.
+ */
+#define PTRS_PER_PTE 1024
+#define PTRS_PER_PTE_SHIFT 10
+#define PTRS_PER_PGD 1024
+#define PGD_ORDER 0
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0UL
+#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
+
+#ifdef CONFIG_MMU
+/*
+ * Virtual memory area. We keep a distance to other memory regions to be
+ * on the safe side. We also use this area for cache aliasing.
+ */
+#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000)
+#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF)
+#define TLBTEMP_BASE_1 (VMALLOC_END + 1)
+#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
+#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
+#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
+#else
+#define TLBTEMP_SIZE ICACHE_WAY_SIZE
+#endif
+
+#else
+
+#define VMALLOC_START __XTENSA_UL_CONST(0)
+#define VMALLOC_END __XTENSA_UL_CONST(0xffffffff)
+
+#endif
+
+/*
+ * For the Xtensa architecture, the PTE layout is as follows:
+ *
+ * 31------12 11 10-9 8-6 5-4 3-2 1-0
+ * +-----------------------------------------+
+ * | | Software | HARDWARE |
+ * | PPN | ADW | RI |Attribute|
+ * +-----------------------------------------+
+ * pte_none | MBZ | 01 | 11 | 00 |
+ * +-----------------------------------------+
+ * present | PPN | 0 | 00 | ADW | RI | CA | wx |
+ * +- - - - - - - - - - - - - - - - - - - - -+
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 |
+ * +-----------------------------------------+
+ * swap | index | type | 01 | 11 | 00 |
+ * +-----------------------------------------+
+ *
+ * For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
+ * +-----------------------------------------+
+ * present | PPN | 0 | 00 | ADW | RI | CA | w1 |
+ * +-----------------------------------------+
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 01 | 00 |
+ * +-----------------------------------------+
+ *
+ * Legend:
+ * PPN Physical Page Number
+ * ADW software: accessed (young) / dirty / writable
+ * RI ring (0=privileged, 1=user, 2 and 3 are unused)
+ * CA cache attribute: 00 bypass, 01 writeback, 10 writethrough
+ * (11 is invalid and used to mark pages that are not present)
+ * w page is writable (hw)
+ * x page is executable (hw)
+ * index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
+ * (note that the index is always non-zero)
+ * type swap type (5 bits -> 32 types)
+ *
+ * Notes:
+ * - (PROT_NONE) is a special case of 'present' but causes an exception for
+ * any access (read, write, and execute).
+ * - 'multihit-exception' has the highest priority of all MMU exceptions,
+ * so the ring must be set to 'RING_USER' even for 'non-present' pages.
+ * - on older hardware, the exectuable flag was not supported and
+ * used as a 'valid' flag, so it needs to be always set.
+ * - we need to keep track of certain flags in software (dirty and young)
+ * to do this, we use write exceptions and have a separate software w-flag.
+ * - attribute value 1101 (and 1111 on T1050 and earlier) is reserved
+ */
+
+#define _PAGE_ATTRIB_MASK 0xf
+
+#define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
+#define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
+
+#define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
+#define _PAGE_CA_WB (1<<2) /* write-back */
+#define _PAGE_CA_WT (2<<2) /* write-through */
+#define _PAGE_CA_MASK (3<<2)
+#define _PAGE_CA_INVALID (3<<2)
+
+/* We use invalid attribute values to distinguish special pte entries */
+#if XCHAL_HW_VERSION_MAJOR < 2000
+#define _PAGE_HW_VALID 0x01 /* older HW needed this bit set */
+#define _PAGE_NONE 0x04
+#else
+#define _PAGE_HW_VALID 0x00
+#define _PAGE_NONE 0x0f
+#endif
+
+#define _PAGE_USER (1<<4) /* user access (ring=1) */
+
+/* Software */
+#define _PAGE_WRITABLE_BIT 6
+#define _PAGE_WRITABLE (1<<6) /* software: page writable */
+#define _PAGE_DIRTY (1<<7) /* software: page dirty */
+#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
+
+#ifdef CONFIG_MMU
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
+
+#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
+#define PAGE_SHARED_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
+#else
+# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
+#endif
+
+#else /* no mmu */
+
+# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+# define PAGE_NONE __pgprot(0)
+# define PAGE_SHARED __pgprot(0)
+# define PAGE_COPY __pgprot(0)
+# define PAGE_READONLY __pgprot(0)
+# define PAGE_KERNEL __pgprot(0)
+
+#endif
+
+/*
+ * On certain configurations of Xtensa MMUs (eg. the initial Linux config),
+ * the MMU can't do page protection for execute, and considers that the same as
+ * read. Also, write permissions may imply read permissions.
+ * What follows is the closest we can get by reasonable means..
+ * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
+ */
+#define __P000 PAGE_NONE /* private --- */
+#define __P001 PAGE_READONLY /* private --r */
+#define __P010 PAGE_COPY /* private -w- */
+#define __P011 PAGE_COPY /* private -wr */
+#define __P100 PAGE_READONLY_EXEC /* private x-- */
+#define __P101 PAGE_READONLY_EXEC /* private x-r */
+#define __P110 PAGE_COPY_EXEC /* private xw- */
+#define __P111 PAGE_COPY_EXEC /* private xwr */
+
+#define __S000 PAGE_NONE /* shared --- */
+#define __S001 PAGE_READONLY /* shared --r */
+#define __S010 PAGE_SHARED /* shared -w- */
+#define __S011 PAGE_SHARED /* shared -wr */
+#define __S100 PAGE_READONLY_EXEC /* shared x-- */
+#define __S101 PAGE_READONLY_EXEC /* shared x-r */
+#define __S110 PAGE_SHARED_EXEC /* shared xw- */
+#define __S111 PAGE_SHARED_EXEC /* shared xwr */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern unsigned long empty_zero_page[1024];
+
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#ifdef CONFIG_MMU
+extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
+extern void paging_init(void);
+#else
+# define swapper_pg_dir NULL
+static inline void paging_init(void) { }
+#endif
+static inline void pgtable_cache_init(void) { }
+
+/*
+ * The pmd contains the kernel virtual address of the pte page.
+ */
+#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
+
+/*
+ * pte status.
+ */
+# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
+#if XCHAL_HW_VERSION_MAJOR < 2000
+# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
+#else
+# define pte_present(pte) \
+ (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
+ || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
+#endif
+#define pte_clear(mm,addr,ptep) \
+ do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
+
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_special(pte_t pte) { return 0; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+ { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
+static inline pte_t pte_mkclean(pte_t pte)
+ { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
+static inline pte_t pte_mkold(pte_t pte)
+ { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)
+ { pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)
+ { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte)
+ { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte)
+ { return pte; }
+
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pte_same(a,b) (pte_val(a) == pte_val(b))
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/*
+ * Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+static inline void update_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+ __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
+#endif
+
+}
+
+struct mm_struct;
+
+static inline void
+set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+ update_pte(ptep, pteval);
+}
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ update_pte(ptep, pteval);
+}
+
+static inline void
+set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ *pmdp = pmdval;
+}
+
+struct vm_area_struct;
+
+static inline int
+ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_young(pte))
+ return 0;
+ update_pte(ptep, pte_mkold(pte));
+ return 1;
+}
+
+static inline pte_t
+ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, addr, ptep);
+ return pte;
+}
+
+static inline void
+ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ update_pte(ptep, pte_wrprotect(pte));
+}
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
+
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir,address) ((pmd_t*)(dir))
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir,addr) \
+ ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
+#define pte_unmap(pte) do { } while (0)
+
+
+/*
+ * Encode and decode a swap and file entry.
+ */
+#define SWP_TYPE_BITS 5
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+
+#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
+#define __swp_offset(entry) ((entry).val >> 11)
+#define __swp_entry(type,offs) \
+ ((swp_entry_t){((type) << 6) | ((offs) << 11) | \
+ _PAGE_CA_INVALID | _PAGE_USER})
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#endif /* !defined (__ASSEMBLY__) */
+
+
+#ifdef __ASSEMBLY__
+
+/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
+ * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
+ * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long)
+ * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long)
+ *
+ * Note: We require an additional temporary register which can be the same as
+ * the register that holds the address.
+ *
+ * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
+ *
+ */
+#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
+#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
+
+#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
+ _PGD_INDEX(tmp, adr); \
+ addx4 mm, tmp, mm
+
+#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
+ srli pmd, pmd, PAGE_SHIFT; \
+ slli pmd, pmd, PAGE_SHIFT; \
+ addx4 pmd, tmp, pmd
+
+#else
+
+#define kern_addr_valid(addr) (1)
+
+extern void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t *ptep);
+
+typedef pte_t *pte_addr_t;
+
+#endif /* !defined (__ASSEMBLY__) */
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+/* We provide our own get_unmapped_area to cope with
+ * SHM area cache aliasing for userland.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#include <asm-generic/pgtable.h>
+
+#endif /* _XTENSA_PGTABLE_H */
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
new file mode 100644
index 000000000..560483356
--- /dev/null
+++ b/arch/xtensa/include/asm/platform.h
@@ -0,0 +1,105 @@
+/*
+ * Platform specific functions
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PLATFORM_H
+#define _XTENSA_PLATFORM_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include <asm/bootparam.h>
+
+/*
+ * platform_init is called before the mmu is initialized to give the
+ * platform a early hook-up. bp_tag_t is a list of configuration tags
+ * passed from the boot-loader.
+ */
+extern void platform_init(bp_tag_t*);
+
+/*
+ * platform_setup is called from setup_arch with a pointer to the command-line
+ * string.
+ */
+extern void platform_setup (char **);
+
+/*
+ * platform_restart is called to restart the system.
+ */
+extern void platform_restart (void);
+
+/*
+ * platform_halt is called to stop the system and halt.
+ */
+extern void platform_halt (void);
+
+/*
+ * platform_power_off is called to stop the system and power it off.
+ */
+extern void platform_power_off (void);
+
+/*
+ * platform_idle is called from the idle function.
+ */
+extern void platform_idle (void);
+
+/*
+ * platform_heartbeat is called every HZ
+ */
+extern void platform_heartbeat (void);
+
+/*
+ * platform_pcibios_init is called to allow the platform to setup the pci bus.
+ */
+extern void platform_pcibios_init (void);
+
+/*
+ * platform_pcibios_fixup allows to modify the PCI configuration.
+ */
+extern int platform_pcibios_fixup (void);
+
+/*
+ * platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
+ */
+extern void platform_calibrate_ccount (void);
+
+/*
+ * Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector.
+ */
+void cpu_reset(void) __attribute__((noreturn));
+
+/*
+ * Memory caching is platform-dependent in noMMU xtensa configurations.
+ * The following set of functions should be implemented in platform code
+ * in order to enable coherent DMA memory operations when CONFIG_MMU is not
+ * enabled. Default implementations do nothing and issue a warning.
+ */
+
+/*
+ * Check whether p points to a cached memory.
+ */
+bool platform_vaddr_cached(const void *p);
+
+/*
+ * Check whether p points to an uncached memory.
+ */
+bool platform_vaddr_uncached(const void *p);
+
+/*
+ * Return pointer to an uncached view of the cached sddress p.
+ */
+void *platform_vaddr_to_uncached(void *p);
+
+/*
+ * Return pointer to a cached view of the uncached sddress p.
+ */
+void *platform_vaddr_to_cached(void *p);
+
+#endif /* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
new file mode 100644
index 000000000..677bc76c1
--- /dev/null
+++ b/arch/xtensa/include/asm/processor.h
@@ -0,0 +1,250 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_PROCESSOR_H
+#define _XTENSA_PROCESSOR_H
+
+#include <variant/core.h>
+
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#include <asm/regs.h>
+
+/* Assertions. */
+
+#if (XCHAL_HAVE_WINDOWED != 1)
+# error Linux requires the Xtensa Windowed Registers Option.
+#endif
+
+/* Xtensa ABI requires stack alignment to be at least 16 */
+
+#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
+
+#define ARCH_SLAB_MINALIGN STACK_ALIGN
+
+/*
+ * User space process size: 1 GB.
+ * Windowed call ABI requires caller and callee to be located within the same
+ * 1 GB region. The C compiler places trampoline code on the stack for sources
+ * that take the address of a nested C function (a feature used by glibc), so
+ * the 1 GB requirement applies to the stack as well.
+ */
+
+#ifdef CONFIG_MMU
+#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
+#else
+#define TASK_SIZE __XTENSA_UL_CONST(0xffffffff)
+#endif
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+/*
+ * General exception cause assigned to fake NMI. Fake NMI needs to be handled
+ * differently from other interrupts, but it uses common kernel entry/exit
+ * code.
+ */
+
+#define EXCCAUSE_MAPPED_NMI 62
+
+/*
+ * General exception cause assigned to debug exceptions. Debug exceptions go
+ * to their own vector, rather than the general exception vectors (user,
+ * kernel, double); and their specific causes are reported via DEBUGCAUSE
+ * rather than EXCCAUSE. However it is sometimes convenient to redirect debug
+ * exceptions to the general exception mechanism. To do this, an otherwise
+ * unused EXCCAUSE value was assigned to debug exceptions for this purpose.
+ */
+
+#define EXCCAUSE_MAPPED_DEBUG 63
+
+/*
+ * We use DEPC also as a flag to distinguish between double and regular
+ * exceptions. For performance reasons, DEPC might contain the value of
+ * EXCCAUSE for regular exceptions, so we use this definition to mark a
+ * valid double exception address.
+ * (Note: We use it in bgeui, so it should be 64, 128, or 256)
+ */
+
+#define VALID_DOUBLE_EXCEPTION_ADDRESS 64
+
+#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
+#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
+
+#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
+#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
+
+#define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
+#define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)
+
+#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
+
+/* LOCKLEVEL defines the interrupt level that masks all
+ * general-purpose interrupts.
+ */
+#if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
+#define LOCKLEVEL (PROFILING_INTLEVEL - 1)
+#else
+#define LOCKLEVEL XCHAL_EXCM_LEVEL
+#endif
+
+#define TOPLEVEL XCHAL_EXCM_LEVEL
+#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
+
+/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
+ * registers
+ */
+#define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */
+#define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */
+
+#ifndef __ASSEMBLY__
+
+/* Build a valid return address for the specified call winsize.
+ * winsize must be 1 (call4), 2 (call8), or 3 (call12)
+ */
+#define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
+
+/* Convert return address to a valid pc
+ * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ */
+#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp. reg must be in the range [0..4).
+ */
+#define SPILL_SLOT(sp, reg) (*(((unsigned long *)(sp)) - 4 + (reg)))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp for the call8. reg must be in the range [4..8).
+ */
+#define SPILL_SLOT_CALL8(sp, reg) (*(((unsigned long *)(sp)) - 12 + (reg)))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp for the call12. reg must be in the range [4..12).
+ */
+#define SPILL_SLOT_CALL12(sp, reg) (*(((unsigned long *)(sp)) - 16 + (reg)))
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct {
+
+ /* kernel's return address and stack pointer for context switching */
+ unsigned long ra; /* kernel's a0: return address and window call size */
+ unsigned long sp; /* kernel's a1: stack pointer */
+
+ mm_segment_t current_ds; /* see uaccess.h for example uses */
+
+ /* struct xtensa_cpuinfo info; */
+
+ unsigned long bad_vaddr; /* last user fault */
+ unsigned long bad_uaddr; /* last kernel fault accessing user space */
+ unsigned long error_code;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK];
+ struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK];
+#endif
+ /* Make structure 16 bytes aligned. */
+ int align[0] __attribute__ ((aligned(16)));
+};
+
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
+
+#define INIT_THREAD \
+{ \
+ ra: 0, \
+ sp: sizeof(init_stack) + (long) &init_stack, \
+ current_ds: {0}, \
+ /*info: {0}, */ \
+ bad_vaddr: 0, \
+ bad_uaddr: 0, \
+ error_code: 0, \
+}
+
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ * Note: We set-up ps as if we did a call4 to the new pc.
+ * set_thread_state in signal.c depends on it.
+ */
+#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
+ (1 << PS_CALLINC_SHIFT) | \
+ (USER_RING << PS_RING_SHIFT) | \
+ (1 << PS_UM_BIT) | \
+ (1 << PS_EXCM_BIT))
+
+/* Clearing a0 terminates the backtrace. */
+#define start_thread(regs, new_pc, new_sp) \
+ memset(regs, 0, sizeof(*regs)); \
+ regs->pc = new_pc; \
+ regs->ps = USER_PS_VALUE; \
+ regs->areg[1] = new_sp; \
+ regs->areg[0] = 0; \
+ regs->wmask = 1; \
+ regs->depc = 0; \
+ regs->windowbase = 0; \
+ regs->windowstart = 1;
+
+/* Forward declaration */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while(0)
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
+
+#define cpu_relax() barrier()
+
+/* Special register access. */
+
+#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v));
+#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v));
+
+#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
+#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
+
+#ifndef XCHAL_HAVE_EXTERN_REGS
+#define XCHAL_HAVE_EXTERN_REGS 0
+#endif
+
+#if XCHAL_HAVE_EXTERN_REGS
+
+static inline void set_er(unsigned long value, unsigned long addr)
+{
+ asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory");
+}
+
+static inline unsigned long get_er(unsigned long addr)
+{
+ register unsigned long value;
+ asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory");
+ return value;
+}
+
+#endif /* XCHAL_HAVE_EXTERN_REGS */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_PROCESSOR_H */
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
new file mode 100644
index 000000000..3a5c5918a
--- /dev/null
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -0,0 +1,110 @@
+/*
+ * include/asm-xtensa/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _XTENSA_PTRACE_H
+#define _XTENSA_PTRACE_H
+
+#include <asm/kmem_layout.h>
+#include <uapi/asm/ptrace.h>
+
+/*
+ * Kernel stack
+ *
+ * +-----------------------+ -------- STACK_SIZE
+ * | register file | |
+ * +-----------------------+ |
+ * | struct pt_regs | |
+ * +-----------------------+ | ------ PT_REGS_OFFSET
+ * double : 16 bytes spill area : | ^
+ * excetion :- - - - - - - - - - - -: | |
+ * frame : struct pt_regs : | |
+ * :- - - - - - - - - - - -: | |
+ * | | | |
+ * | memory stack | | |
+ * | | | |
+ * ~ ~ ~ ~
+ * ~ ~ ~ ~
+ * | | | |
+ * | | | |
+ * +-----------------------+ | | --- STACK_BIAS
+ * | struct task_struct | | | ^
+ * current --> +-----------------------+ | | |
+ * | struct thread_info | | | |
+ * +-----------------------+ --------
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <asm/coprocessor.h>
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ */
+struct pt_regs {
+ unsigned long pc; /* 4 */
+ unsigned long ps; /* 8 */
+ unsigned long depc; /* 12 */
+ unsigned long exccause; /* 16 */
+ unsigned long excvaddr; /* 20 */
+ unsigned long debugcause; /* 24 */
+ unsigned long wmask; /* 28 */
+ unsigned long lbeg; /* 32 */
+ unsigned long lend; /* 36 */
+ unsigned long lcount; /* 40 */
+ unsigned long sar; /* 44 */
+ unsigned long windowbase; /* 48 */
+ unsigned long windowstart; /* 52 */
+ unsigned long syscall; /* 56 */
+ unsigned long icountlevel; /* 60 */
+ unsigned long scompare1; /* 64 */
+ unsigned long threadptr; /* 68 */
+
+ /* Additional configurable registers that are used by the compiler. */
+ xtregs_opt_t xtregs_opt;
+
+ /* Make sure the areg field is 16 bytes aligned. */
+ int align[0] __attribute__ ((aligned(16)));
+
+ /* current register frame.
+ * Note: The ESF for kernel exceptions ends after 16 registers!
+ */
+ unsigned long areg[16];
+};
+
+#include <variant/core.h>
+
+# define arch_has_single_step() (1)
+# define task_pt_regs(tsk) ((struct pt_regs*) \
+ (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
+# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
+# define instruction_pointer(regs) ((regs)->pc)
+# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
+ (regs)->areg[1]))
+
+# ifndef CONFIG_SMP
+# define profile_pc(regs) instruction_pointer(regs)
+# else
+# define profile_pc(regs) \
+ ({ \
+ in_lock_functions(instruction_pointer(regs)) ? \
+ return_pointer(regs) : instruction_pointer(regs); \
+ })
+# endif
+
+#define user_stack_pointer(regs) ((regs)->areg[1])
+
+#else /* __ASSEMBLY__ */
+
+# include <asm/asm-offsets.h>
+#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _XTENSA_PTRACE_H */
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
new file mode 100644
index 000000000..477594e58
--- /dev/null
+++ b/arch/xtensa/include/asm/regs.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2006 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+#ifndef _XTENSA_REGS_H
+#define _XTENSA_REGS_H
+
+/* Special registers. */
+
+#define SREG_MR 32
+#define SREG_IBREAKENABLE 96
+#define SREG_IBREAKA 128
+#define SREG_DBREAKA 144
+#define SREG_DBREAKC 160
+#define SREG_EPC 176
+#define SREG_EPS 192
+#define SREG_EXCSAVE 208
+#define SREG_CCOMPARE 240
+#define SREG_MISC 244
+
+/* EXCCAUSE register fields */
+
+#define EXCCAUSE_EXCCAUSE_SHIFT 0
+#define EXCCAUSE_EXCCAUSE_MASK 0x3F
+
+#define EXCCAUSE_ILLEGAL_INSTRUCTION 0
+#define EXCCAUSE_SYSTEM_CALL 1
+#define EXCCAUSE_INSTRUCTION_FETCH_ERROR 2
+#define EXCCAUSE_LOAD_STORE_ERROR 3
+#define EXCCAUSE_LEVEL1_INTERRUPT 4
+#define EXCCAUSE_ALLOCA 5
+#define EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6
+#define EXCCAUSE_SPECULATION 7
+#define EXCCAUSE_PRIVILEGED 8
+#define EXCCAUSE_UNALIGNED 9
+#define EXCCAUSE_INSTR_DATA_ERROR 12
+#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
+#define EXCCAUSE_INSTR_ADDR_ERROR 14
+#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
+#define EXCCAUSE_ITLB_MISS 16
+#define EXCCAUSE_ITLB_MULTIHIT 17
+#define EXCCAUSE_ITLB_PRIVILEGE 18
+#define EXCCAUSE_ITLB_SIZE_RESTRICTION 19
+#define EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20
+#define EXCCAUSE_DTLB_MISS 24
+#define EXCCAUSE_DTLB_MULTIHIT 25
+#define EXCCAUSE_DTLB_PRIVILEGE 26
+#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
+#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
+#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
+#define EXCCAUSE_COPROCESSOR0_DISABLED 32
+#define EXCCAUSE_COPROCESSOR1_DISABLED 33
+#define EXCCAUSE_COPROCESSOR2_DISABLED 34
+#define EXCCAUSE_COPROCESSOR3_DISABLED 35
+#define EXCCAUSE_COPROCESSOR4_DISABLED 36
+#define EXCCAUSE_COPROCESSOR5_DISABLED 37
+#define EXCCAUSE_COPROCESSOR6_DISABLED 38
+#define EXCCAUSE_COPROCESSOR7_DISABLED 39
+#define EXCCAUSE_N 64
+
+/* PS register fields. */
+
+#define PS_WOE_BIT 18
+#define PS_CALLINC_SHIFT 16
+#define PS_CALLINC_MASK 0x00030000
+#define PS_OWB_SHIFT 8
+#define PS_OWB_WIDTH 4
+#define PS_OWB_MASK 0x00000F00
+#define PS_RING_SHIFT 6
+#define PS_RING_MASK 0x000000C0
+#define PS_UM_BIT 5
+#define PS_EXCM_BIT 4
+#define PS_INTLEVEL_SHIFT 0
+#define PS_INTLEVEL_WIDTH 4
+#define PS_INTLEVEL_MASK 0x0000000F
+
+/* DBREAKCn register fields. */
+
+#define DBREAKC_MASK_BIT 0
+#define DBREAKC_MASK_MASK 0x0000003F
+#define DBREAKC_LOAD_BIT 30
+#define DBREAKC_LOAD_MASK 0x40000000
+#define DBREAKC_STOR_BIT 31
+#define DBREAKC_STOR_MASK 0x80000000
+
+/* DEBUGCAUSE register fields. */
+
+#define DEBUGCAUSE_DBNUM_MASK 0xf00
+#define DEBUGCAUSE_DBNUM_SHIFT 8 /* First bit of DBNUM field */
+#define DEBUGCAUSE_DEBUGINT_BIT 5 /* External debug interrupt */
+#define DEBUGCAUSE_BREAKN_BIT 4 /* BREAK.N instruction */
+#define DEBUGCAUSE_BREAK_BIT 3 /* BREAK instruction */
+#define DEBUGCAUSE_DBREAK_BIT 2 /* DBREAK match */
+#define DEBUGCAUSE_IBREAK_BIT 1 /* IBREAK match */
+#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
+
+#endif /* _XTENSA_SPECREG_H */
diff --git a/arch/xtensa/include/asm/segment.h b/arch/xtensa/include/asm/segment.h
new file mode 100644
index 000000000..98964ad15
--- /dev/null
+++ b/arch/xtensa/include/asm/segment.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/segment.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SEGMENT_H
+#define _XTENSA_SEGMENT_H
+
+#include <linux/uaccess.h>
+
+#endif /* _XTENSA_SEGEMENT_H */
diff --git a/arch/xtensa/include/asm/serial.h b/arch/xtensa/include/asm/serial.h
new file mode 100644
index 000000000..a8a249326
--- /dev/null
+++ b/arch/xtensa/include/asm/serial.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-xtensa/serial.h
+ *
+ * Configuration details for 8250, 16450, 16550, etc. serial ports
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SERIAL_H
+#define _XTENSA_SERIAL_H
+
+#include <platform/serial.h>
+
+#endif /* _XTENSA_SERIAL_H */
diff --git a/arch/xtensa/include/asm/shmparam.h b/arch/xtensa/include/asm/shmparam.h
new file mode 100644
index 000000000..c8cc16c3d
--- /dev/null
+++ b/arch/xtensa/include/asm/shmparam.h
@@ -0,0 +1,21 @@
+/*
+ * include/asm-xtensa/shmparam.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_SHMPARAM_H
+#define _XTENSA_SHMPARAM_H
+
+/*
+ * Xtensa can have variable size caches, and if
+ * the size of single way is larger than the page size,
+ * then we have to start worrying about cache aliasing
+ * problems.
+ */
+
+#define SHMLBA ((PAGE_SIZE > DCACHE_WAY_SIZE)? PAGE_SIZE : DCACHE_WAY_SIZE)
+
+#endif /* _XTENSA_SHMPARAM_H */
diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
new file mode 100644
index 000000000..de169b4ea
--- /dev/null
+++ b/arch/xtensa/include/asm/signal.h
@@ -0,0 +1,23 @@
+/*
+ * include/asm-xtensa/signal.h
+ *
+ * Swiped from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _XTENSA_SIGNAL_H
+#define _XTENSA_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+#ifndef __ASSEMBLY__
+#define __ARCH_HAS_SA_RESTORER
+
+#include <asm/sigcontext.h>
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_SIGNAL_H */
diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h
new file mode 100644
index 000000000..4e43f5643
--- /dev/null
+++ b/arch/xtensa/include/asm/smp.h
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SMP_H
+#define _XTENSA_SMP_H
+
+#ifdef CONFIG_SMP
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define cpu_logical_map(cpu) (cpu)
+
+struct start_info {
+ unsigned long stack;
+};
+extern struct start_info start_info;
+
+struct cpumask;
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_single_ipi(int cpu);
+
+void smp_init_cpus(void);
+void secondary_init_irq(void);
+void ipi_init(void);
+struct seq_file;
+void show_ipi_list(struct seq_file *p, int prec);
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+void __cpu_die(unsigned int cpu);
+int __cpu_disable(void);
+void cpu_die(void);
+void cpu_restart(void);
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* CONFIG_SMP */
+
+#endif /* _XTENSA_SMP_H */
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
new file mode 100644
index 000000000..c6e1290dc
--- /dev/null
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -0,0 +1,199 @@
+/*
+ * include/asm-xtensa/spinlock.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SPINLOCK_H
+#define _XTENSA_SPINLOCK_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+/*
+ * spinlock
+ *
+ * There is at most one owner of a spinlock. There are not different
+ * types of spinlock owners like there are for rwlocks (see below).
+ *
+ * When trying to obtain a spinlock, the function "spins" forever, or busy-
+ * waits, until the lock is obtained. When spinning, presumably some other
+ * owner will soon give up the spinlock making it available to others. Use
+ * the trylock functions to avoid spinning forever.
+ *
+ * possible values:
+ *
+ * 0 nobody owns the spinlock
+ * 1 somebody owns the spinlock
+ */
+
+#define arch_spin_is_locked(x) ((x)->slock != 0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ "1: movi %0, 1\n"
+ " s32c1i %0, %1, 0\n"
+ " bnez %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ " movi %0, 1\n"
+ " s32c1i %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+
+ return tmp == 0 ? 1 : 0;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " s32ri %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+}
+
+/*
+ * rwlock
+ *
+ * Read-write locks are really a more flexible spinlock. They allow
+ * multiple readers but only one writer. Write ownership is exclusive
+ * (i.e., all other readers and writers are blocked from ownership while
+ * there is a write owner). These rwlocks are unfair to writers. Writers
+ * can be starved for an indefinite time by readers.
+ *
+ * possible values:
+ *
+ * 0 nobody owns the rwlock
+ * >0 one or more readers own the rwlock
+ * (the positive value is the actual number of readers)
+ * 0x80000000 one writer owns the rwlock, no other writers, no readers
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ "1: movi %0, 1\n"
+ " slli %0, %0, 31\n"
+ " s32c1i %0, %1, 0\n"
+ " bnez %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ " movi %0, 1\n"
+ " slli %0, %0, 31\n"
+ " s32c1i %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+
+ return tmp == 0 ? 1 : 0;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " s32ri %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+ unsigned long result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " bltz %1, 1b\n"
+ " wsr %1, scompare1\n"
+ " addi %0, %1, 1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned long result;
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " l32i %1, %2, 0\n"
+ " addi %0, %1, 1\n"
+ " bltz %0, 1f\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " sub %0, %0, %1\n"
+ "1:\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+
+ return result == 0;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " addi %0, %1, -1\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp1), "=&a" (tmp2)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+#endif /* _XTENSA_SPINLOCK_H */
diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h
new file mode 100644
index 000000000..bb1fe6c18
--- /dev/null
+++ b/arch/xtensa/include/asm/spinlock_types.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/arch/xtensa/include/asm/stackprotector.h b/arch/xtensa/include/asm/stackprotector.h
new file mode 100644
index 000000000..e368f94fd
--- /dev/null
+++ b/arch/xtensa/include/asm/stackprotector.h
@@ -0,0 +1,40 @@
+/*
+ * GCC stack protector support.
+ *
+ * (This is directly adopted from the ARM implementation)
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h
new file mode 100644
index 000000000..fe06e8ed1
--- /dev/null
+++ b/arch/xtensa/include/asm/stacktrace.h
@@ -0,0 +1,44 @@
+/*
+ * arch/xtensa/include/asm/stacktrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_STACKTRACE_H
+#define _XTENSA_STACKTRACE_H
+
+#include <linux/sched.h>
+
+struct stackframe {
+ unsigned long pc;
+ unsigned long sp;
+};
+
+static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+{
+ unsigned long *sp;
+
+ if (!task || task == current)
+ __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
+ else
+ sp = (unsigned long *)task->thread.sp;
+
+ return sp;
+}
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data);
+
+void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
+ int (*kfn)(struct stackframe *frame, void *data),
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data);
+void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data);
+
+#endif /* _XTENSA_STACKTRACE_H */
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
new file mode 100644
index 000000000..89b51a0c7
--- /dev/null
+++ b/arch/xtensa/include/asm/string.h
@@ -0,0 +1,140 @@
+/*
+ * include/asm-xtensa/string.h
+ *
+ * These trivial string functions are considered part of the public domain.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+/* We should optimize these. See arch/xtensa/lib/strncpy_user.S */
+
+#ifndef _XTENSA_STRING_H
+#define _XTENSA_STRING_H
+
+#define __HAVE_ARCH_STRCPY
+static inline char *strcpy(char *__dest, const char *__src)
+{
+ register char *__xdest = __dest;
+ unsigned long __dummy;
+
+ __asm__ __volatile__("1:\n\t"
+ "l8ui %2, %1, 0\n\t"
+ "s8i %2, %0, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "addi %0, %0, 1\n\t"
+ "bnez %2, 1b\n\t"
+ : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
+ : "0" (__dest), "1" (__src)
+ : "memory");
+
+ return __xdest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+static inline char *strncpy(char *__dest, const char *__src, size_t __n)
+{
+ register char *__xdest = __dest;
+ unsigned long __dummy;
+
+ if (__n == 0)
+ return __xdest;
+
+ __asm__ __volatile__(
+ "1:\n\t"
+ "l8ui %2, %1, 0\n\t"
+ "s8i %2, %0, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "bne %1, %5, 1b\n"
+ "2:"
+ : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
+ : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n)
+ : "memory");
+
+ return __xdest;
+}
+
+#define __HAVE_ARCH_STRCMP
+static inline int strcmp(const char *__cs, const char *__ct)
+{
+ register int __res;
+ unsigned long __dummy;
+
+ __asm__ __volatile__(
+ "1:\n\t"
+ "l8ui %3, %1, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "l8ui %2, %0, 0\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "beq %2, %3, 1b\n"
+ "2:\n\t"
+ "sub %2, %2, %3"
+ : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
+ : "0" (__cs), "1" (__ct));
+
+ return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
+{
+ register int __res;
+ unsigned long __dummy;
+
+ __asm__ __volatile__(
+ "mov %2, %3\n"
+ "1:\n\t"
+ "beq %0, %6, 2f\n\t"
+ "l8ui %3, %1, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "l8ui %2, %0, 0\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "beqz %3, 2f\n\t"
+ "beq %2, %3, 1b\n"
+ "2:\n\t"
+ "sub %2, %2, %3"
+ : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
+ : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n));
+
+ return __res;
+}
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
+
+/* Don't build bcopy at all ... */
+#define __HAVE_ARCH_BCOPY
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+#endif
+
+#endif /* _XTENSA_STRING_H */
diff --git a/arch/xtensa/include/asm/switch_to.h b/arch/xtensa/include/asm/switch_to.h
new file mode 100644
index 000000000..6b73bf0eb
--- /dev/null
+++ b/arch/xtensa/include/asm/switch_to.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SWITCH_TO_H
+#define _XTENSA_SWITCH_TO_H
+
+/* * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ */
+extern void *_switch_to(void *last, void *next);
+
+#define switch_to(prev,next,last) \
+do { \
+ (last) = _switch_to(prev, next); \
+} while(0)
+
+#endif /* _XTENSA_SWITCH_TO_H */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
new file mode 100644
index 000000000..3673ff1f1
--- /dev/null
+++ b/arch/xtensa/include/asm/syscall.h
@@ -0,0 +1,27 @@
+/*
+ * include/asm-xtensa/syscall.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+struct pt_regs;
+asmlinkage long xtensa_ptrace(long, long, long, long);
+asmlinkage long xtensa_sigreturn(struct pt_regs*);
+asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
+asmlinkage long xtensa_shmat(int, char __user *, int);
+asmlinkage long xtensa_fadvise64_64(int, int,
+ unsigned long long, unsigned long long);
+
+/* Should probably move to linux/syscalls.h */
+struct pollfd;
+asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
+ fd_set __user *exp, struct timespec __user *tsp,
+ void __user *sig);
+asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
+ struct timespec __user *tsp,
+ const sigset_t __user *sigmask,
+ size_t sigsetsize);
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
new file mode 100644
index 000000000..552cdfd85
--- /dev/null
+++ b/arch/xtensa/include/asm/sysmem.h
@@ -0,0 +1,19 @@
+/*
+ * sysmem-related prototypes.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_SYSMEM_H
+#define _XTENSA_SYSMEM_H
+
+#include <linux/memblock.h>
+
+void bootmem_init(void);
+void zones_init(void);
+
+#endif /* _XTENSA_SYSMEM_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
new file mode 100644
index 000000000..2bd19ae61
--- /dev/null
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -0,0 +1,135 @@
+/*
+ * include/asm-xtensa/thread_info.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_THREAD_INFO_H
+#define _XTENSA_THREAD_INFO_H
+
+#include <asm/kmem_layout.h>
+
+#define CURRENT_SHIFT KERNEL_STACK_SHIFT
+
+#ifndef __ASSEMBLY__
+# include <asm/processor.h>
+#endif
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+
+#ifndef __ASSEMBLY__
+
+#if XTENSA_HAVE_COPROCESSORS
+
+typedef struct xtregs_coprocessor {
+ xtregs_cp0_t cp0;
+ xtregs_cp1_t cp1;
+ xtregs_cp2_t cp2;
+ xtregs_cp3_t cp3;
+ xtregs_cp4_t cp4;
+ xtregs_cp5_t cp5;
+ xtregs_cp6_t cp6;
+ xtregs_cp7_t cp7;
+} xtregs_coprocessor_t;
+
+#endif
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ unsigned long status; /* thread-synchronous flags */
+ __u32 cpu; /* current CPU */
+ __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
+
+ mm_segment_t addr_limit; /* thread address space */
+
+ unsigned long cpenable;
+
+ /* Allocate storage for extra user states and coprocessor states. */
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_coprocessor_t xtregs_cp;
+#endif
+ xtregs_user_t xtregs_user;
+};
+
+#endif
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+}
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ struct thread_info *ti;
+ __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
+ "xor %0, a1, %0" : "=&r" (ti) : );
+ return ti;
+}
+
+#else /* !__ASSEMBLY__ */
+
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg,sp) \
+ extui reg, sp, 0, CURRENT_SHIFT; \
+ xor reg, sp, reg
+#endif
+
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
+#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
+#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
+#define TIF_DB_DISABLED 8 /* debug trap disabled for syscall */
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+
+#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
+#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
+
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
+
+#define THREAD_SIZE KERNEL_STACK_SIZE
+#define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)
+
+#endif /* _XTENSA_THREAD_INFO */
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
new file mode 100644
index 000000000..d866bc847
--- /dev/null
+++ b/arch/xtensa/include/asm/timex.h
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TIMEX_H
+#define _XTENSA_TIMEX_H
+
+#include <asm/processor.h>
+#include <linux/stringify.h>
+
+#if XCHAL_NUM_TIMERS > 0 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 0
+# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
+#elif XCHAL_NUM_TIMERS > 1 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 1
+# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
+#elif XCHAL_NUM_TIMERS > 2 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 2
+# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
+#else
+# error "Bad timer number for Linux configurations!"
+#endif
+
+extern unsigned long ccount_freq;
+
+void local_timer_setup(unsigned cpu);
+
+/*
+ * Register access.
+ */
+
+#define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r))
+#define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r))
+#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
+#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
+
+static inline unsigned long get_ccount (void)
+{
+ unsigned long ccount;
+ RSR_CCOUNT(ccount);
+ return ccount;
+}
+
+static inline void set_ccount (unsigned long ccount)
+{
+ WSR_CCOUNT(ccount);
+}
+
+static inline unsigned long get_linux_timer (void)
+{
+ unsigned ccompare;
+ RSR_CCOMPARE(LINUX_TIMER, ccompare);
+ return ccompare;
+}
+
+static inline void set_linux_timer (unsigned long ccompare)
+{
+ WSR_CCOMPARE(LINUX_TIMER, ccompare);
+}
+
+#include <asm-generic/timex.h>
+
+#endif /* _XTENSA_TIMEX_H */
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
new file mode 100644
index 000000000..0d766f9c1
--- /dev/null
+++ b/arch/xtensa/include/asm/tlb.h
@@ -0,0 +1,47 @@
+/*
+ * include/asm-xtensa/tlb.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TLB_H
+#define _XTENSA_TLB_H
+
+#include <asm/cache.h>
+#include <asm/page.h>
+
+#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
+
+/* Note, read http://lkml.org/lkml/2004/1/15/6 */
+
+# define tlb_start_vma(tlb,vma) do { } while (0)
+# define tlb_end_vma(tlb,vma) do { } while (0)
+
+#else
+
+# define tlb_start_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+ } while(0)
+
+# define tlb_end_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+ } while(0)
+
+#endif
+
+#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
+
+#endif /* _XTENSA_TLB_H */
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
new file mode 100644
index 000000000..06875feb2
--- /dev/null
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -0,0 +1,208 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TLBFLUSH_H
+#define _XTENSA_TLBFLUSH_H
+
+#include <linux/stringify.h>
+#include <asm/processor.h>
+
+#define DTLB_WAY_PGD 7
+
+#define ITLB_ARF_WAYS 4
+#define DTLB_ARF_WAYS 4
+
+#define ITLB_HIT_BIT 3
+#define DTLB_HIT_BIT 4
+
+#ifndef __ASSEMBLY__
+
+/* TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(mm, vmaddr) flushes a single page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ */
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long page);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#ifdef CONFIG_SMP
+
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *);
+void flush_tlb_page(struct vm_area_struct *, unsigned long);
+void flush_tlb_range(struct vm_area_struct *, unsigned long,
+ unsigned long);
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#else /* !CONFIG_SMP */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
+ end)
+#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
+ end)
+
+#endif /* CONFIG_SMP */
+
+/* TLB operations. */
+
+static inline unsigned long itlb_probe(unsigned long addr)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
+ return tmp;
+}
+
+static inline unsigned long dtlb_probe(unsigned long addr)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
+ return tmp;
+}
+
+static inline void invalidate_itlb_entry (unsigned long probe)
+{
+ __asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
+}
+
+static inline void invalidate_dtlb_entry (unsigned long probe)
+{
+ __asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
+}
+
+/* Use the .._no_isync functions with caution. Generally, these are
+ * handy for bulk invalidates followed by a single 'isync'. The
+ * caller must follow up with an 'isync', which can be relatively
+ * expensive on some Xtensa implementations.
+ */
+static inline void invalidate_itlb_entry_no_isync (unsigned entry)
+{
+ /* Caller must follow up with 'isync'. */
+ __asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
+}
+
+static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
+{
+ /* Caller must follow up with 'isync'. */
+ __asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
+}
+
+static inline void set_itlbcfg_register (unsigned long val)
+{
+ __asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
+ : : "a" (val));
+}
+
+static inline void set_dtlbcfg_register (unsigned long val)
+{
+ __asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t"
+ : : "a" (val));
+}
+
+static inline void set_ptevaddr_register (unsigned long val)
+{
+ __asm__ __volatile__(" wsr %0, ptevaddr; isync\n"
+ : : "a" (val));
+}
+
+static inline unsigned long read_ptevaddr_register (void)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp));
+ return tmp;
+}
+
+static inline void write_dtlb_entry (pte_t entry, int way)
+{
+ __asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
+ : : "r" (way), "r" (entry) );
+}
+
+static inline void write_itlb_entry (pte_t entry, int way)
+{
+ __asm__ __volatile__("witlb %1, %0; isync\n\t"
+ : : "r" (way), "r" (entry) );
+}
+
+static inline void invalidate_page_directory (void)
+{
+ invalidate_dtlb_entry (DTLB_WAY_PGD);
+ invalidate_dtlb_entry (DTLB_WAY_PGD+1);
+ invalidate_dtlb_entry (DTLB_WAY_PGD+2);
+}
+
+static inline void invalidate_itlb_mapping (unsigned address)
+{
+ unsigned long tlb_entry;
+ if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
+ invalidate_itlb_entry(tlb_entry);
+}
+
+static inline void invalidate_dtlb_mapping (unsigned address)
+{
+ unsigned long tlb_entry;
+ if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
+ invalidate_dtlb_entry(tlb_entry);
+}
+
+#define check_pgt_cache() do { } while (0)
+
+
+/*
+ * DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
+ * ISA and exist only for test purposes..
+ * You may find it helpful for MMU debugging, however.
+ *
+ * 'at' is the unmodified input register
+ * 'as' is the output register, as follows (specific to the Linux config):
+ *
+ * as[31..12] contain the virtual address
+ * as[11..08] are meaningless
+ * as[07..00] contain the asid
+ */
+
+static inline unsigned long read_dtlb_virtual (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_dtlb_translation (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_itlb_virtual (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_itlb_translation (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_TLBFLUSH_H */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
new file mode 100644
index 000000000..f5cd7a7e6
--- /dev/null
+++ b/arch/xtensa/include/asm/traps.h
@@ -0,0 +1,120 @@
+/*
+ * arch/xtensa/include/asm/traps.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Tensilica Inc.
+ */
+#ifndef _XTENSA_TRAPS_H
+#define _XTENSA_TRAPS_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Per-CPU exception handling data structure.
+ * EXCSAVE1 points to it.
+ */
+struct exc_table {
+ /* Kernel Stack */
+ void *kstk;
+ /* Double exception save area for a0 */
+ unsigned long double_save;
+ /* Fixup handler */
+ void *fixup;
+ /* For passing a parameter to fixup */
+ void *fixup_param;
+ /* For fast syscall handler */
+ unsigned long syscall_save;
+ /* Fast user exception handlers */
+ void *fast_user_handler[EXCCAUSE_N];
+ /* Fast kernel exception handlers */
+ void *fast_kernel_handler[EXCCAUSE_N];
+ /* Default C-Handlers */
+ void *default_handler[EXCCAUSE_N];
+};
+
+/*
+ * handler must be either of the following:
+ * void (*)(struct pt_regs *regs);
+ * void (*)(struct pt_regs *regs, unsigned long exccause);
+ */
+extern void * __init trap_set_handler(int cause, void *handler);
+extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
+void fast_second_level_miss(void);
+
+/* Initialize minimal exc_table structure sufficient for basic paging */
+static inline void __init early_trap_init(void)
+{
+ static struct exc_table exc_table __initdata = {
+ .fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
+ fast_second_level_miss,
+ };
+ __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table));
+}
+
+void secondary_trap_init(void);
+
+static inline void spill_registers(void)
+{
+#if XCHAL_NUM_AREGS > 16
+ __asm__ __volatile__ (
+ " call8 1f\n"
+ " _j 2f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+#if XCHAL_NUM_AREGS == 32
+ " _entry a1, 32\n"
+ " addi a8, a0, 3\n"
+ " _entry a1, 16\n"
+ " mov a12, a12\n"
+ " retw\n"
+#else
+ " _entry a1, 48\n"
+ " call12 1f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
+ " _entry a1, 48\n"
+ " mov a12, a0\n"
+ " .endr\n"
+ " _entry a1, 16\n"
+#if XCHAL_NUM_AREGS % 12 == 0
+ " mov a12, a12\n"
+#elif XCHAL_NUM_AREGS % 12 == 4
+ " mov a4, a4\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+ " mov a8, a8\n"
+#endif
+ " retw\n"
+#endif
+ "2:\n"
+ : : : "a8", "a9", "memory");
+#else
+ __asm__ __volatile__ (
+ " mov a12, a12\n"
+ : : : "memory");
+#endif
+}
+
+struct debug_table {
+ /* Pointer to debug exception handler */
+ void (*debug_exception)(void);
+ /* Temporary register save area */
+ unsigned long debug_save[1];
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ /* Save area for DBREAKC registers */
+ unsigned long dbreakc_save[XCHAL_NUM_DBREAK];
+ /* Saved ICOUNT register */
+ unsigned long icount_save;
+ /* Saved ICOUNTLEVEL register */
+ unsigned long icount_level_save;
+#endif
+};
+
+void debug_exception(void);
+
+#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/asm/types.h b/arch/xtensa/include/asm/types.h
new file mode 100644
index 000000000..2b410b8c7
--- /dev/null
+++ b/arch/xtensa/include/asm/types.h
@@ -0,0 +1,23 @@
+/*
+ * include/asm-xtensa/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _XTENSA_TYPES_H
+#define _XTENSA_TYPES_H
+
+#include <uapi/asm/types.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+
+#define BITS_PER_LONG 32
+
+#endif
+#endif /* _XTENSA_TYPES_H */
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
new file mode 100644
index 000000000..da4effe27
--- /dev/null
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -0,0 +1,311 @@
+/*
+ * include/asm-xtensa/uaccess.h
+ *
+ * User space memory access functions
+ *
+ * These routines provide basic accessing functions to the user memory
+ * space for the kernel. This header file provides functions such as:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UACCESS_H
+#define _XTENSA_UACCESS_H
+
+#include <linux/prefetch.h>
+#include <asm/types.h>
+#include <asm/extable.h>
+
+/*
+ * The fs value determines whether argument validity checking should
+ * be performed or not. If get_fs() == USER_DS, checking is
+ * performed, with get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons (Data Segment Register?), these macros are
+ * grossly misnamed.
+ */
+
+#define KERNEL_DS ((mm_segment_t) { 0 })
+#define USER_DS ((mm_segment_t) { 1 })
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->thread.current_ds)
+#define set_fs(val) (current->thread.current_ds = (val))
+
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#define __kernel_ok (uaccess_kernel())
+#define __user_ok(addr, size) \
+ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
+#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
+#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
+
+#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
+
+/*
+ * These are the main single-value transfer routines. They
+ * automatically use the right size if we just have the right pointer
+ * type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in
+ * "get_user()" and yet we don't want to do any pointers, because that
+ * is too much of a performance impact. Thus we have a few rather ugly
+ * macros here, and hide all the uglyness from the user.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
+#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ */
+#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+
+extern long __put_user_bad(void);
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err; \
+ __put_user_size((x), (ptr), (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ int __cb; \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \
+ case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \
+ case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
+ case 8: { \
+ __typeof__(*ptr) __v64 = x; \
+ retval = __copy_to_user(ptr, &__v64, 8); \
+ break; \
+ } \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+
+/*
+ * Consider a case of a user single load/store would cause both an
+ * unaligned exception and an MMU-related exception (unaligned
+ * exceptions happen first):
+ *
+ * User code passes a bad variable ptr to a system call.
+ * Kernel tries to access the variable.
+ * Unaligned exception occurs.
+ * Unaligned exception handler tries to make aligned accesses.
+ * Double exception occurs for MMU-related cause (e.g., page not mapped).
+ * do_page_fault() thinks the fault address belongs to the kernel, not the
+ * user, and panics.
+ *
+ * The kernel currently prohibits user unaligned accesses. We use the
+ * __check_align_* macros to check for unaligned addresses before
+ * accessing user space so we don't crash the kernel. Both
+ * __put_user_asm and __get_user_asm use these alignment macros, so
+ * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
+ * sync.
+ */
+
+#define __check_align_1 ""
+
+#define __check_align_2 \
+ " _bbci.l %3, 0, 1f \n" \
+ " movi %0, %4 \n" \
+ " _j 2f \n"
+
+#define __check_align_4 \
+ " _bbsi.l %3, 0, 0f \n" \
+ " _bbci.l %3, 1, 1f \n" \
+ "0: movi %0, %4 \n" \
+ " _j 2f \n"
+
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ *
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
+ */
+#define __put_user_asm(x, addr, err, align, insn, cb) \
+__asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %2, %3, 0 \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: \n" \
+ " .long 2b \n" \
+ "5: \n" \
+ " l32r %1, 4b \n" \
+ " movi %0, %4 \n" \
+ " jx %1 \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :"=r" (err), "=r" (cb) \
+ :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err, __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT, __gu_val = 0; \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
+ if (access_ok(VERIFY_READ, __gu_addr, size)) \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ int __cb; \
+ retval = 0; \
+ switch (size) { \
+ case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
+ case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
+ case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
+ case 8: retval = __copy_from_user(&x, ptr, 8); break; \
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+
+/*
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
+ */
+#define __get_user_asm(x, addr, err, align, insn, cb) \
+__asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %2, %3, 0 \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: \n" \
+ " .long 2b \n" \
+ "5: \n" \
+ " l32r %1, 4b \n" \
+ " movi %2, 0 \n" \
+ " movi %0, %4 \n" \
+ " jx %1 \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :"=r" (err), "=r" (cb), "=r" (x) \
+ :"r" (addr), "i" (-EFAULT), "0" (err))
+
+
+/*
+ * Copy to/from user space
+ */
+
+extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ prefetchw(to);
+ return __xtensa_copy_user(to, (__force const void *)from, n);
+}
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ prefetch(from);
+ return __xtensa_copy_user((__force void *)to, from, n);
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+/*
+ * We need to return the number of bytes not cleared. Our memset()
+ * returns zero if a problem occurs while accessing user-space memory.
+ * In that event, return no memory cleared. Otherwise, zero for
+ * success.
+ */
+
+static inline unsigned long
+__xtensa_clear_user(void *addr, unsigned long size)
+{
+ if (!__memset(addr, 0, size))
+ return size;
+ return 0;
+}
+
+static inline unsigned long
+clear_user(void *addr, unsigned long size)
+{
+ if (access_ok(VERIFY_WRITE, addr, size))
+ return __xtensa_clear_user(addr, size);
+ return size ? -EFAULT : 0;
+}
+
+#define __clear_user __xtensa_clear_user
+
+
+#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
+
+extern long __strncpy_user(char *, const char *, long);
+
+static inline long
+strncpy_from_user(char *dst, const char *src, long count)
+{
+ if (access_ok(VERIFY_READ, src, 1))
+ return __strncpy_user(dst, src, count);
+ return -EFAULT;
+}
+#else
+long strncpy_from_user(char *dst, const char __user *src, long count);
+#endif
+
+/*
+ * Return the size of a string (including the ending 0!)
+ */
+extern long __strnlen_user(const char *, long);
+
+static inline long strnlen_user(const char *str, long len)
+{
+ unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
+
+ if ((unsigned long)str > top)
+ return 0;
+ return __strnlen_user(str, len);
+}
+
+#endif /* _XTENSA_UACCESS_H */
diff --git a/arch/xtensa/include/asm/ucontext.h b/arch/xtensa/include/asm/ucontext.h
new file mode 100644
index 000000000..94c94ed3e
--- /dev/null
+++ b/arch/xtensa/include/asm/ucontext.h
@@ -0,0 +1,22 @@
+/*
+ * include/asm-xtensa/ucontext.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UCONTEXT_H
+#define _XTENSA_UCONTEXT_H
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* _XTENSA_UCONTEXT_H */
diff --git a/arch/xtensa/include/asm/unaligned.h b/arch/xtensa/include/asm/unaligned.h
new file mode 100644
index 000000000..8e7ed046b
--- /dev/null
+++ b/arch/xtensa/include/asm/unaligned.h
@@ -0,0 +1,29 @@
+/*
+ * Xtensa doesn't handle unaligned accesses efficiently.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _ASM_XTENSA_UNALIGNED_H
+#define _ASM_XTENSA_UNALIGNED_H
+
+#include <asm/byteorder.h>
+
+#ifdef __LITTLE_ENDIAN
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#else
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#endif
+
+#endif /* _ASM_XTENSA_UNALIGNED_H */
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
new file mode 100644
index 000000000..ed66db3bc
--- /dev/null
+++ b/arch/xtensa/include/asm/unistd.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _XTENSA_UNISTD_H
+#define _XTENSA_UNISTD_H
+
+#define __ARCH_WANT_SYS_CLONE
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_GETPGRP
+
+/*
+ * Ignore legacy system calls in the checksyscalls.sh script
+ */
+
+#define __IGNORE_fork /* use clone */
+#define __IGNORE_time
+#define __IGNORE_alarm /* use setitimer */
+#define __IGNORE_pause
+#define __IGNORE_mmap /* use mmap2 */
+#define __IGNORE_vfork /* use clone */
+#define __IGNORE_fadvise64 /* use fadvise64_64 */
+
+#endif /* _XTENSA_UNISTD_H */
diff --git a/arch/xtensa/include/asm/user.h b/arch/xtensa/include/asm/user.h
new file mode 100644
index 000000000..2c3ed2335
--- /dev/null
+++ b/arch/xtensa/include/asm/user.h
@@ -0,0 +1,20 @@
+/*
+ * include/asm-xtensa/user.h
+ *
+ * Xtensa Processor version.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_USER_H
+#define _XTENSA_USER_H
+
+/* This file usually defines a 'struct user' structure. However, it it only
+ * used for a.out file, which are not supported on Xtensa.
+ */
+
+#endif /* _XTENSA_USER_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
new file mode 100644
index 000000000..2d3e0cca9
--- /dev/null
+++ b/arch/xtensa/include/asm/vectors.h
@@ -0,0 +1,125 @@
+/*
+ * arch/xtensa/include/asm/xchal_vaddr_remap.h
+ *
+ * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
+ * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
+ * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica Inc.
+ *
+ * Pete Delaney <piet@tensilica.com>
+ * Marc Gauthier <marc@tensilica.com
+ */
+
+#ifndef _XTENSA_VECTORS_H
+#define _XTENSA_VECTORS_H
+
+#include <variant/core.h>
+#include <asm/kmem_layout.h>
+
+#if XCHAL_HAVE_PTP_MMU
+#define XCHAL_KIO_CACHED_VADDR 0xe0000000
+#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
+#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#else
+#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
+#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
+#endif
+#define XCHAL_KIO_SIZE 0x10000000
+
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
+#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
+#ifndef __ASSEMBLY__
+extern unsigned long xtensa_kio_paddr;
+
+static inline unsigned long xtensa_get_kio_paddr(void)
+{
+ return xtensa_kio_paddr;
+}
+#endif
+#else
+#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
+#endif
+
+#if defined(CONFIG_MMU)
+
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/* Image Virtual Start Address */
+#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
+ CONFIG_KERNEL_LOAD_ADDRESS - \
+ XCHAL_KSEG_PADDR)
+#else
+#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
+#endif
+
+#else /* !defined(CONFIG_MMU) */
+ /* MMU Not being used - Virtual == Physical */
+
+/* Location of the start of the kernel text, _start */
+#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
+
+
+#endif /* CONFIG_MMU */
+
+#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
+#ifdef CONFIG_VECTORS_OFFSET
+#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)
+#else
+#define VECBASE_VADDR _vecbase
+#endif
+
+#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
+
+#define VECTOR_VADDR(offset) (VECBASE_VADDR + offset)
+
+#define USER_VECTOR_VADDR VECTOR_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR VECTOR_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR VECTOR_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR VECTOR_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL6_VECOFS)
+#define INTLEVEL7_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL7_VECOFS)
+#define DEBUG_VECTOR_VADDR VECTOR_VADDR(XCHAL_DEBUG_VECOFS)
+
+/*
+ * These XCHAL_* #defines from varian/core.h
+ * are not valid to use with V3 MMU. Non-XCHAL
+ * constants are defined above and should be used.
+ */
+#undef XCHAL_VECBASE_RESET_VADDR
+#undef XCHAL_USER_VECTOR_VADDR
+#undef XCHAL_KERNEL_VECTOR_VADDR
+#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
+#undef XCHAL_WINDOW_VECTORS_VADDR
+#undef XCHAL_INTLEVEL2_VECTOR_VADDR
+#undef XCHAL_INTLEVEL3_VECTOR_VADDR
+#undef XCHAL_INTLEVEL4_VECTOR_VADDR
+#undef XCHAL_INTLEVEL5_VECTOR_VADDR
+#undef XCHAL_INTLEVEL6_VECTOR_VADDR
+#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+#undef XCHAL_DEBUG_VECTOR_VADDR
+
+#else
+
+#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR
+#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR
+#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR
+#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR
+#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
+#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR
+#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
+#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define INTLEVEL7_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
+
+#endif
+
+#endif /* _XTENSA_VECTORS_H */
diff --git a/arch/xtensa/include/asm/vga.h b/arch/xtensa/include/asm/vga.h
new file mode 100644
index 000000000..1fd8cab3a
--- /dev/null
+++ b/arch/xtensa/include/asm/vga.h
@@ -0,0 +1,19 @@
+/*
+ * include/asm-xtensa/vga.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_VGA_H
+#define _XTENSA_VGA_H
+
+#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif