summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/kernel
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/xtensa/kernel
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/xtensa/kernel')
-rw-r--r--arch/xtensa/kernel/.gitignore2
-rw-r--r--arch/xtensa/kernel/Makefile46
-rw-r--r--arch/xtensa/kernel/align.S487
-rw-r--r--arch/xtensa/kernel/asm-offsets.c164
-rw-r--r--arch/xtensa/kernel/coprocessor.S302
-rw-r--r--arch/xtensa/kernel/entry.S2264
-rw-r--r--arch/xtensa/kernel/head.S386
-rw-r--r--arch/xtensa/kernel/hibernate.c25
-rw-r--r--arch/xtensa/kernel/hw_breakpoint.c308
-rw-r--r--arch/xtensa/kernel/irq.c194
-rw-r--r--arch/xtensa/kernel/jump_label.c95
-rw-r--r--arch/xtensa/kernel/mcount.S85
-rw-r--r--arch/xtensa/kernel/module.c189
-rw-r--r--arch/xtensa/kernel/mxhead.S64
-rw-r--r--arch/xtensa/kernel/pci-dma.c98
-rw-r--r--arch/xtensa/kernel/pci.c87
-rw-r--r--arch/xtensa/kernel/perf_event.c450
-rw-r--r--arch/xtensa/kernel/platform.c43
-rw-r--r--arch/xtensa/kernel/process.c398
-rw-r--r--arch/xtensa/kernel/ptrace.c585
-rw-r--r--arch/xtensa/kernel/s32c1i_selftest.c127
-rw-r--r--arch/xtensa/kernel/setup.c704
-rw-r--r--arch/xtensa/kernel/signal.c533
-rw-r--r--arch/xtensa/kernel/smp.c634
-rw-r--r--arch/xtensa/kernel/stacktrace.c275
-rw-r--r--arch/xtensa/kernel/syscall.c99
-rw-r--r--arch/xtensa/kernel/syscalls/Makefile32
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl423
-rw-r--r--arch/xtensa/kernel/time.c214
-rw-r--r--arch/xtensa/kernel/traps.c595
-rw-r--r--arch/xtensa/kernel/vectors.S805
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S380
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c123
33 files changed, 11216 insertions, 0 deletions
diff --git a/arch/xtensa/kernel/.gitignore b/arch/xtensa/kernel/.gitignore
new file mode 100644
index 000000000..bbb90f92d
--- /dev/null
+++ b/arch/xtensa/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
new file mode 100644
index 000000000..f28b8e3d7
--- /dev/null
+++ b/arch/xtensa/kernel/Makefile
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/Xtensa kernel.
+#
+
+extra-y := vmlinux.lds
+
+obj-y := head.o align.o coprocessor.o entry.o irq.o platform.o process.o \
+ ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
+ vectors.o
+
+obj-$(CONFIG_MMU) += pci-dma.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SECONDARY_RESET_VECTOR) += mxhead.o
+obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o
+
+# In the Xtensa architecture, assembly generates literals which must always
+# precede the L32R instruction with a relative offset less than 256 kB.
+# Therefore, the .text and .literal section must be combined in parenthesis
+# in the linker script, such as: *(.literal .text).
+#
+# We need to post-process the generated vmlinux.lds scripts to convert
+# *(xxx.text) to *(xxx.literal xxx.text) for the following text sections:
+# .text .ref.text .*init.text .*exit.text .text.*
+#
+# Replicate rules in scripts/Makefile.build
+
+sed-y = -e ':a; s/\*(\([^)]*\)\.text\.unlikely/*(\1.literal.unlikely .{text}.unlikely/; ta; ' \
+ -e ':b; s/\*(\([^)]*\)\.text\(\.[a-z]*\)/*(\1.{text}\2.literal .{text}\2/; tb; ' \
+ -e ':c; s/\*(\([^)]*\)\(\.[a-z]*it\|\.ref\)\.text/*(\1\2.literal \2.{text}/; tc; ' \
+ -e ':d; s/\*(\([^)]\+ \|\)\.text/*(\1.literal .{text}/; td; ' \
+ -e 's/\.{text}/.text/g'
+
+quiet_cmd__cpp_lds_S = LDS $@
+cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ \
+ -DLINKER_SCRIPT $< | sed $(sed-y) >$@
+
+$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
+ $(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
new file mode 100644
index 000000000..d062c732e
--- /dev/null
+++ b/arch/xtensa/kernel/align.S
@@ -0,0 +1,487 @@
+/*
+ * arch/xtensa/kernel/align.S
+ *
+ * Handle unalignment exceptions in kernel space.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica, Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ *
+ * Rewritten by Chris Zankel <chris@zankel.net>
+ *
+ * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
+ */
+
+#include <linux/linkage.h>
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+
+/* First-level exception handler for unaligned exceptions.
+ *
+ * Note: This handler works only for kernel exceptions. Unaligned user
+ * access should get a seg fault.
+ */
+
+/* Big and little endian 16-bit values are located in
+ * different halves of a register. HWORD_START helps to
+ * abstract the notion of extracting a 16-bit value from a
+ * register.
+ * We also have to define new shifting instructions because
+ * lsb and msb are on 'opposite' ends in a register for
+ * different endian machines.
+ *
+ * Assume a memory region in ascending address:
+ * 0 1 2 3|4 5 6 7
+ *
+ * When loading one word into a register, the content of that register is:
+ * LE 3 2 1 0, 7 6 5 4
+ * BE 0 1 2 3, 4 5 6 7
+ *
+ * Masking the bits of the higher/lower address means:
+ * LE X X 0 0, 0 0 X X
+ * BE 0 0 X X, X X 0 0
+ *
+ * Shifting to higher/lower addresses, means:
+ * LE shift left / shift right
+ * BE shift right / shift left
+ *
+ * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
+ * LE mask 0 0 X X / shift left
+ * BE shift left / mask 0 0 X X
+ */
+
+#if XCHAL_HAVE_WINDOWED
+#define UNALIGNED_USER_EXCEPTION
+#endif
+
+#if XCHAL_HAVE_BE
+
+#define HWORD_START 16
+#define INSN_OP0 28
+#define INSN_T 24
+#define INSN_OP1 16
+
+.macro __ssa8r r; ssa8l \r; .endm
+.macro __sh r, s; srl \r, \s; .endm
+.macro __sl r, s; sll \r, \s; .endm
+.macro __exth r, s; extui \r, \s, 0, 16; .endm
+.macro __extl r, s; slli \r, \s, 16; .endm
+
+#else
+
+#define HWORD_START 0
+#define INSN_OP0 0
+#define INSN_T 4
+#define INSN_OP1 12
+
+.macro __ssa8r r; ssa8b \r; .endm
+.macro __sh r, s; sll \r, \s; .endm
+.macro __sl r, s; srl \r, \s; .endm
+.macro __exth r, s; slli \r, \s, 16; .endm
+.macro __extl r, s; extui \r, \s, 0, 16; .endm
+
+#endif
+
+/*
+ * xxxx xxxx = imm8 field
+ * yyyy = imm4 field
+ * ssss = s field
+ * tttt = t field
+ *
+ * 16 0
+ * -------------------
+ * L32I.N yyyy ssss tttt 1000
+ * S32I.N yyyy ssss tttt 1001
+ *
+ * 23 0
+ * -----------------------------
+ * res 0000 0010
+ * L16UI xxxx xxxx 0001 ssss tttt 0010
+ * L32I xxxx xxxx 0010 ssss tttt 0010
+ * XXX 0011 ssss tttt 0010
+ * XXX 0100 ssss tttt 0010
+ * S16I xxxx xxxx 0101 ssss tttt 0010
+ * S32I xxxx xxxx 0110 ssss tttt 0010
+ * XXX 0111 ssss tttt 0010
+ * XXX 1000 ssss tttt 0010
+ * L16SI xxxx xxxx 1001 ssss tttt 0010
+ * XXX 1010 0010
+ * **L32AI xxxx xxxx 1011 ssss tttt 0010 unsupported
+ * XXX 1100 0010
+ * XXX 1101 0010
+ * XXX 1110 0010
+ * **S32RI xxxx xxxx 1111 ssss tttt 0010 unsupported
+ * -----------------------------
+ * ^ ^ ^
+ * sub-opcode (NIBBLE_R) -+ | |
+ * t field (NIBBLE_T) -----------+ |
+ * major opcode (NIBBLE_OP0) --------------+
+ */
+
+#define OP0_L32I_N 0x8 /* load immediate narrow */
+#define OP0_S32I_N 0x9 /* store immediate narrow */
+#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */
+#define OP1_SI_BIT 2 /* OP1 bit number for stores */
+
+#define OP1_L32I 0x2
+#define OP1_L16UI 0x1
+#define OP1_L16SI 0x9
+#define OP1_L32AI 0xb
+
+#define OP1_S32I 0x6
+#define OP1_S16I 0x5
+#define OP1_S32RI 0xf
+
+/*
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ .literal_position
+ENTRY(fast_unaligned)
+
+ /* Note: We don't expect the address to be aligned on a word
+ * boundary. After all, the processor generated that exception
+ * and it would be a hardware fault.
+ */
+
+ /* Save some working register */
+
+ s32i a4, a2, PT_AREG4
+ s32i a5, a2, PT_AREG5
+ s32i a6, a2, PT_AREG6
+ s32i a7, a2, PT_AREG7
+ s32i a8, a2, PT_AREG8
+
+ rsr a0, depc
+ s32i a0, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+
+ rsr a3, excsave1
+ movi a4, fast_unaligned_fixup
+ s32i a4, a3, EXC_TABLE_FIXUP
+
+ /* Keep value of SAR in a0 */
+
+ rsr a0, sar
+ rsr a8, excvaddr # load unaligned memory address
+
+ /* Now, identify one of the following load/store instructions.
+ *
+ * The only possible danger of a double exception on the
+ * following l32i instructions is kernel code in vmalloc
+ * memory. The processor was just executing at the EPC_1
+ * address, and indeed, already fetched the instruction. That
+ * guarantees a TLB mapping, which hasn't been replaced by
+ * this unaligned exception handler that uses only static TLB
+ * mappings. However, high-level interrupt handlers might
+ * modify TLB entries, so for the generic case, we register a
+ * TABLE_FIXUP handler here, too.
+ */
+
+ /* a3...a6 saved on stack, a2 = SP */
+
+ /* Extract the instruction that caused the unaligned access. */
+
+ rsr a7, epc1 # load exception address
+ movi a3, ~3
+ and a3, a3, a7 # mask lower bits
+
+ l32i a4, a3, 0 # load 2 words
+ l32i a5, a3, 4
+
+ __ssa8 a7
+ __src_b a4, a4, a5 # a4 has the instruction
+
+ /* Analyze the instruction (load or store?). */
+
+ extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
+
+#if XCHAL_HAVE_DENSITY
+ _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
+ addi a6, a5, -OP0_S32I_N
+ _beqz a6, .Lstore # S32I.N, do a store
+#endif
+ /* 'store indicator bit' not set, jump */
+ _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
+
+ /* Store: Jump to table entry to get the value in the source register.*/
+
+.Lstore:movi a5, .Lstore_table # table
+ extui a6, a4, INSN_T, 4 # get source register
+ addx8 a5, a6, a5
+ jx a5 # jump into table
+
+ /* Load: Load memory address. */
+
+.Lload: movi a3, ~3
+ and a3, a3, a8 # align memory address
+
+ __ssa8 a8
+#ifdef UNALIGNED_USER_EXCEPTION
+ addi a3, a3, 8
+ l32e a5, a3, -8
+ l32e a6, a3, -4
+#else
+ l32i a5, a3, 0
+ l32i a6, a3, 4
+#endif
+ __src_b a3, a5, a6 # a3 has the data word
+
+#if XCHAL_HAVE_DENSITY
+ addi a7, a7, 2 # increment PC (assume 16-bit insn)
+
+ extui a5, a4, INSN_OP0, 4
+ _beqi a5, OP0_L32I_N, 1f # l32i.n: jump
+
+ addi a7, a7, 1
+#else
+ addi a7, a7, 3
+#endif
+
+ extui a5, a4, INSN_OP1, 4
+ _beqi a5, OP1_L32I, 1f # l32i: jump
+
+ extui a3, a3, 0, 16 # extract lower 16 bits
+ _beqi a5, OP1_L16UI, 1f
+ addi a5, a5, -OP1_L16SI
+ _bnez a5, .Linvalid_instruction_load
+
+ /* sign extend value */
+
+ slli a3, a3, 16
+ srai a3, a3, 16
+
+ /* Set target register. */
+
+1:
+ extui a4, a4, INSN_T, 4 # extract target register
+ movi a5, .Lload_table
+ addx8 a4, a4, a5
+ jx a4 # jump to entry for target register
+
+ .align 8
+.Lload_table:
+ s32i a3, a2, PT_AREG0; _j .Lexit; .align 8
+ mov a1, a3; _j .Lexit; .align 8 # fishy??
+ s32i a3, a2, PT_AREG2; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG3; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG4; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG5; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG6; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG7; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG8; _j .Lexit; .align 8
+ mov a9, a3 ; _j .Lexit; .align 8
+ mov a10, a3 ; _j .Lexit; .align 8
+ mov a11, a3 ; _j .Lexit; .align 8
+ mov a12, a3 ; _j .Lexit; .align 8
+ mov a13, a3 ; _j .Lexit; .align 8
+ mov a14, a3 ; _j .Lexit; .align 8
+ mov a15, a3 ; _j .Lexit; .align 8
+
+.Lstore_table:
+ l32i a3, a2, PT_AREG0; _j 1f; .align 8
+ mov a3, a1; _j 1f; .align 8 # fishy??
+ l32i a3, a2, PT_AREG2; _j 1f; .align 8
+ l32i a3, a2, PT_AREG3; _j 1f; .align 8
+ l32i a3, a2, PT_AREG4; _j 1f; .align 8
+ l32i a3, a2, PT_AREG5; _j 1f; .align 8
+ l32i a3, a2, PT_AREG6; _j 1f; .align 8
+ l32i a3, a2, PT_AREG7; _j 1f; .align 8
+ l32i a3, a2, PT_AREG8; _j 1f; .align 8
+ mov a3, a9 ; _j 1f; .align 8
+ mov a3, a10 ; _j 1f; .align 8
+ mov a3, a11 ; _j 1f; .align 8
+ mov a3, a12 ; _j 1f; .align 8
+ mov a3, a13 ; _j 1f; .align 8
+ mov a3, a14 ; _j 1f; .align 8
+ mov a3, a15 ; _j 1f; .align 8
+
+ /* We cannot handle this exception. */
+
+ .extern _kernel_exception
+.Linvalid_instruction_load:
+.Linvalid_instruction_store:
+
+ movi a4, 0
+ rsr a3, excsave1
+ s32i a4, a3, EXC_TABLE_FIXUP
+
+ /* Restore a4...a8 and SAR, set SP, and jump to default exception. */
+
+ l32i a8, a2, PT_AREG8
+ l32i a7, a2, PT_AREG7
+ l32i a6, a2, PT_AREG6
+ l32i a5, a2, PT_AREG5
+ l32i a4, a2, PT_AREG4
+ wsr a0, sar
+ mov a1, a2
+
+ rsr a0, ps
+ bbsi.l a0, PS_UM_BIT, 2f # jump if user mode
+
+ movi a0, _kernel_exception
+ jx a0
+
+2: movi a0, _user_exception
+ jx a0
+
+1: # a7: instruction pointer, a4: instruction, a3: value
+
+ movi a6, 0 # mask: ffffffff:00000000
+
+#if XCHAL_HAVE_DENSITY
+ addi a7, a7, 2 # incr. PC,assume 16-bit instruction
+
+ extui a5, a4, INSN_OP0, 4 # extract OP0
+ addi a5, a5, -OP0_S32I_N
+ _beqz a5, 1f # s32i.n: jump
+
+ addi a7, a7, 1 # increment PC, 32-bit instruction
+#else
+ addi a7, a7, 3 # increment PC, 32-bit instruction
+#endif
+
+ extui a5, a4, INSN_OP1, 4 # extract OP1
+ _beqi a5, OP1_S32I, 1f # jump if 32 bit store
+ _bnei a5, OP1_S16I, .Linvalid_instruction_store
+
+ movi a5, -1
+ __extl a3, a3 # get 16-bit value
+ __exth a6, a5 # get 16-bit mask ffffffff:ffff0000
+
+ /* Get memory address */
+
+1:
+ movi a4, ~3
+ and a4, a4, a8 # align memory address
+
+ /* Insert value into memory */
+
+ movi a5, -1 # mask: ffffffff:XXXX0000
+#ifdef UNALIGNED_USER_EXCEPTION
+ addi a4, a4, 8
+#endif
+
+ __ssa8r a8
+ __src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE)
+ __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE)
+#ifdef UNALIGNED_USER_EXCEPTION
+ l32e a5, a4, -8
+#else
+ l32i a5, a4, 0 # load lower address word
+#endif
+ and a5, a5, a8 # mask
+ __sh a8, a3 # shift value
+ or a5, a5, a8 # or with original value
+#ifdef UNALIGNED_USER_EXCEPTION
+ s32e a5, a4, -8
+ l32e a8, a4, -4
+#else
+ s32i a5, a4, 0 # store
+ l32i a8, a4, 4 # same for upper address word
+#endif
+ __sl a5, a3
+ and a6, a8, a6
+ or a6, a6, a5
+#ifdef UNALIGNED_USER_EXCEPTION
+ s32e a6, a4, -4
+#else
+ s32i a6, a4, 4
+#endif
+
+.Lexit:
+#if XCHAL_HAVE_LOOPS
+ rsr a4, lend # check if we reached LEND
+ bne a7, a4, 1f
+ rsr a4, lcount # and LCOUNT != 0
+ beqz a4, 1f
+ addi a4, a4, -1 # decrement LCOUNT and set
+ rsr a7, lbeg # set PC to LBEGIN
+ wsr a4, lcount
+#endif
+
+1: wsr a7, epc1 # skip emulated instruction
+
+ /* Update icount if we're single-stepping in userspace. */
+ rsr a4, icountlevel
+ beqz a4, 1f
+ bgeui a4, LOCKLEVEL + 1, 1f
+ rsr a4, icount
+ addi a4, a4, 1
+ wsr a4, icount
+1:
+ movi a4, 0
+ rsr a3, excsave1
+ s32i a4, a3, EXC_TABLE_FIXUP
+
+ /* Restore working register */
+
+ l32i a8, a2, PT_AREG8
+ l32i a7, a2, PT_AREG7
+ l32i a6, a2, PT_AREG6
+ l32i a5, a2, PT_AREG5
+ l32i a4, a2, PT_AREG4
+ l32i a3, a2, PT_AREG3
+
+ /* restore SAR and return */
+
+ wsr a0, sar
+ l32i a0, a2, PT_AREG0
+ l32i a2, a2, PT_AREG2
+ rfe
+
+ENDPROC(fast_unaligned)
+
+ENTRY(fast_unaligned_fixup)
+
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ wsr a3, excsave1
+
+ l32i a8, a2, PT_AREG8
+ l32i a7, a2, PT_AREG7
+ l32i a6, a2, PT_AREG6
+ l32i a5, a2, PT_AREG5
+ l32i a4, a2, PT_AREG4
+ l32i a0, a2, PT_AREG2
+ xsr a0, depc # restore depc and a0
+ wsr a0, sar
+
+ rsr a0, exccause
+ s32i a0, a2, PT_DEPC # mark as a regular exception
+
+ rsr a0, ps
+ bbsi.l a0, PS_UM_BIT, 1f # jump if user mode
+
+ rsr a0, exccause
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler
+ l32i a3, a2, PT_AREG3
+ jx a0
+1:
+ rsr a0, exccause
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ l32i a3, a2, PT_AREG3
+ jx a0
+
+ENDPROC(fast_unaligned_fixup)
+
+#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
new file mode 100644
index 000000000..da38de20a
--- /dev/null
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -0,0 +1,164 @@
+/*
+ * arch/xtensa/kernel/asm-offsets.c
+ *
+ * Generates definitions from c-type structures used by assembly sources.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+
+int main(void)
+{
+ /* struct pt_regs */
+ DEFINE(PT_PC, offsetof (struct pt_regs, pc));
+ DEFINE(PT_PS, offsetof (struct pt_regs, ps));
+ DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
+ DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
+ DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
+ DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
+ DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
+ DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
+ DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
+ DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
+ DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
+ DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
+ DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
+ DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
+ DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr));
+ DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
+ DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
+ DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
+ DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
+ DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
+ DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
+ DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
+ DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
+ DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
+ DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
+ DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
+ DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
+ DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
+ DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
+ DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
+ DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
+ DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
+ DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
+ DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
+ DEFINE(PT_KERNEL_SIZE, offsetof(struct pt_regs, areg[16]));
+ DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
+ DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
+ DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
+ DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t));
+
+ /* struct task_struct */
+ DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
+ DEFINE(TASK_MM, offsetof (struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
+ DEFINE(TASK_PID, offsetof (struct task_struct, pid));
+ DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
+#ifdef CONFIG_STACKPROTECTOR
+ DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
+ DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
+
+ /* offsets in thread_info struct */
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_STSTUS, thread_info, status);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ OFFSET(TI_PS_WOE_FIX_ADDR, thread_info, ps_woe_fix_addr);
+#endif
+
+ /* struct thread_info (offset from start_struct) */
+ DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
+ DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
+#if XCHAL_HAVE_EXCLUSIVE
+ DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8));
+#endif
+ DEFINE(THREAD_CPENABLE, offsetof(struct thread_info, cpenable));
+ DEFINE(THREAD_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(THREAD_CP_OWNER_CPU, offsetof(struct thread_info, cp_owner_cpu));
+#if XTENSA_HAVE_COPROCESSORS
+ DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
+ DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
+ DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
+ DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
+ DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
+ DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
+ DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
+ DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
+#endif
+ DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
+ DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
+
+ /* struct mm_struct */
+ DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
+ DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
+ DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
+
+ /* struct page */
+ DEFINE(PAGE_FLAGS, offsetof(struct page, flags));
+
+ /* constants */
+ DEFINE(_CLONE_VM, CLONE_VM);
+ DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED);
+ DEFINE(PG_ARCH_1, PG_arch_1);
+
+ /* struct debug_table */
+ DEFINE(DT_DEBUG_EXCEPTION,
+ offsetof(struct debug_table, debug_exception));
+ DEFINE(DT_DEBUG_SAVE, offsetof(struct debug_table, debug_save));
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ DEFINE(DT_DBREAKC_SAVE, offsetof(struct debug_table, dbreakc_save));
+ DEFINE(DT_ICOUNT_SAVE, offsetof(struct debug_table, icount_save));
+ DEFINE(DT_ICOUNT_LEVEL_SAVE,
+ offsetof(struct debug_table, icount_level_save));
+#endif
+
+ /* struct exc_table */
+ DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk));
+ DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save));
+ DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup));
+ DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param));
+#if XTENSA_HAVE_COPROCESSORS
+ DEFINE(EXC_TABLE_COPROCESSOR_OWNER,
+ offsetof(struct exc_table, coprocessor_owner));
+#endif
+ DEFINE(EXC_TABLE_FAST_USER,
+ offsetof(struct exc_table, fast_user_handler));
+ DEFINE(EXC_TABLE_FAST_KERNEL,
+ offsetof(struct exc_table, fast_kernel_handler));
+ DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler));
+
+#ifdef CONFIG_HIBERNATION
+ DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
+ DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
+ DEFINE(PBE_NEXT, offsetof(struct pbe, next));
+ DEFINE(PBE_SIZE, sizeof(struct pbe));
+#endif
+
+ return 0;
+}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
new file mode 100644
index 000000000..ef33e76e0
--- /dev/null
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -0,0 +1,302 @@
+/*
+ * arch/xtensa/kernel/coprocessor.S
+ *
+ * Xtensa processor configuration-specific table of coprocessor and
+ * other custom register layout information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/coprocessor.h>
+#include <asm/current.h>
+#include <asm/regs.h>
+
+/*
+ * Rules for coprocessor state manipulation on SMP:
+ *
+ * - a task may have live coprocessors only on one CPU.
+ *
+ * - whether coprocessor context of task T is live on some CPU is
+ * denoted by T's thread_info->cpenable.
+ *
+ * - non-zero thread_info->cpenable means that thread_info->cp_owner_cpu
+ * is valid in the T's thread_info. Zero thread_info->cpenable means that
+ * coprocessor context is valid in the T's thread_info.
+ *
+ * - if a coprocessor context of task T is live on CPU X, only CPU X changes
+ * T's thread_info->cpenable, cp_owner_cpu and coprocessor save area.
+ * This is done by making sure that for the task T with live coprocessor
+ * on CPU X cpenable SR is 0 when T runs on any other CPU Y.
+ * When fast_coprocessor exception is taken on CPU Y it goes to the
+ * C-level do_coprocessor that uses IPI to make CPU X flush T's coprocessors.
+ */
+
+#if XTENSA_HAVE_COPROCESSORS
+
+/*
+ * Macros for lazy context switch.
+ */
+
+#define SAVE_CP_REGS(x) \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ .align 4; \
+ .Lsave_cp_regs_cp##x: \
+ xchal_cp##x##_store a2 a3 a4 a5 a6; \
+ ret; \
+ .endif
+
+#define LOAD_CP_REGS(x) \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ .align 4; \
+ .Lload_cp_regs_cp##x: \
+ xchal_cp##x##_load a2 a3 a4 a5 a6; \
+ ret; \
+ .endif
+
+#define CP_REGS_TAB(x) \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ .long .Lsave_cp_regs_cp##x; \
+ .long .Lload_cp_regs_cp##x; \
+ .else; \
+ .long 0, 0; \
+ .endif; \
+ .long THREAD_XTREGS_CP##x
+
+#define CP_REGS_TAB_SAVE 0
+#define CP_REGS_TAB_LOAD 4
+#define CP_REGS_TAB_OFFSET 8
+
+ __XTENSA_HANDLER
+
+ SAVE_CP_REGS(0)
+ SAVE_CP_REGS(1)
+ SAVE_CP_REGS(2)
+ SAVE_CP_REGS(3)
+ SAVE_CP_REGS(4)
+ SAVE_CP_REGS(5)
+ SAVE_CP_REGS(6)
+ SAVE_CP_REGS(7)
+
+ LOAD_CP_REGS(0)
+ LOAD_CP_REGS(1)
+ LOAD_CP_REGS(2)
+ LOAD_CP_REGS(3)
+ LOAD_CP_REGS(4)
+ LOAD_CP_REGS(5)
+ LOAD_CP_REGS(6)
+ LOAD_CP_REGS(7)
+
+ .align 4
+.Lcp_regs_jump_table:
+ CP_REGS_TAB(0)
+ CP_REGS_TAB(1)
+ CP_REGS_TAB(2)
+ CP_REGS_TAB(3)
+ CP_REGS_TAB(4)
+ CP_REGS_TAB(5)
+ CP_REGS_TAB(6)
+ CP_REGS_TAB(7)
+
+/*
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_coprocessor)
+
+ s32i a3, a2, PT_AREG3
+
+#ifdef CONFIG_SMP
+ /*
+ * Check if any coprocessor context is live on another CPU
+ * and if so go through the C-level coprocessor exception handler
+ * to flush it to memory.
+ */
+ GET_THREAD_INFO (a0, a2)
+ l32i a3, a0, THREAD_CPENABLE
+ beqz a3, .Lload_local
+
+ /*
+ * Pairs with smp_wmb in local_coprocessor_release_all
+ * and with both memws below.
+ */
+ memw
+ l32i a3, a0, THREAD_CPU
+ l32i a0, a0, THREAD_CP_OWNER_CPU
+ beq a0, a3, .Lload_local
+
+ rsr a0, ps
+ l32i a3, a2, PT_AREG3
+ bbci.l a0, PS_UM_BIT, 1f
+ call0 user_exception
+1: call0 kernel_exception
+#endif
+
+ /* Save remaining registers a1-a3 and SAR */
+
+.Lload_local:
+ rsr a3, sar
+ s32i a1, a2, PT_AREG1
+ s32i a3, a2, PT_SAR
+ mov a1, a2
+ rsr a2, depc
+ s32i a2, a1, PT_AREG2
+
+ /* The hal macros require up to 4 temporary registers. We use a3..a6. */
+
+ s32i a4, a1, PT_AREG4
+ s32i a5, a1, PT_AREG5
+ s32i a6, a1, PT_AREG6
+ s32i a7, a1, PT_AREG7
+ s32i a8, a1, PT_AREG8
+ s32i a9, a1, PT_AREG9
+ s32i a10, a1, PT_AREG10
+
+ /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
+
+ rsr a3, exccause
+ addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+
+ /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
+
+ ssl a3 # SAR: 32 - coprocessor_number
+ movi a2, 1
+ rsr a0, cpenable
+ sll a2, a2
+ or a0, a0, a2
+ wsr a0, cpenable
+ rsync
+
+ /* Get coprocessor save/load table entry (a7). */
+
+ movi a7, .Lcp_regs_jump_table
+ addx8 a7, a3, a7
+ addx4 a7, a3, a7
+
+ /* Retrieve previous owner (a8). */
+
+ rsr a0, excsave1 # exc_table
+ addx4 a0, a3, a0 # entry for CP
+ l32i a8, a0, EXC_TABLE_COPROCESSOR_OWNER
+
+ /* Set new owner (a9). */
+
+ GET_THREAD_INFO (a9, a1)
+ l32i a4, a9, THREAD_CPU
+ s32i a9, a0, EXC_TABLE_COPROCESSOR_OWNER
+ s32i a4, a9, THREAD_CP_OWNER_CPU
+
+ /*
+ * Enable coprocessor for the new owner. (a2 = 1 << CP number)
+ * This can be done before loading context into the coprocessor.
+ */
+ l32i a4, a9, THREAD_CPENABLE
+ or a4, a4, a2
+
+ /*
+ * Make sure THREAD_CP_OWNER_CPU is in memory before updating
+ * THREAD_CPENABLE
+ */
+ memw # (2)
+ s32i a4, a9, THREAD_CPENABLE
+
+ beqz a8, 1f # skip 'save' if no previous owner
+
+ /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
+
+ l32i a10, a8, THREAD_CPENABLE
+ xor a10, a10, a2
+
+ /* Get context save area and call save routine. */
+
+ l32i a2, a7, CP_REGS_TAB_OFFSET
+ l32i a3, a7, CP_REGS_TAB_SAVE
+ add a2, a2, a8
+ callx0 a3
+
+ /*
+ * Make sure coprocessor context and THREAD_CP_OWNER_CPU are in memory
+ * before updating THREAD_CPENABLE
+ */
+ memw # (3)
+ s32i a10, a8, THREAD_CPENABLE
+1:
+ /* Get context save area and call load routine. */
+
+ l32i a2, a7, CP_REGS_TAB_OFFSET
+ l32i a3, a7, CP_REGS_TAB_LOAD
+ add a2, a2, a9
+ callx0 a3
+
+ /* Restore all registers and return from exception handler. */
+
+ l32i a10, a1, PT_AREG10
+ l32i a9, a1, PT_AREG9
+ l32i a8, a1, PT_AREG8
+ l32i a7, a1, PT_AREG7
+ l32i a6, a1, PT_AREG6
+ l32i a5, a1, PT_AREG5
+ l32i a4, a1, PT_AREG4
+
+ l32i a0, a1, PT_SAR
+ l32i a3, a1, PT_AREG3
+ l32i a2, a1, PT_AREG2
+ wsr a0, sar
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+
+ rfe
+
+ENDPROC(fast_coprocessor)
+
+ .text
+
+/*
+ * coprocessor_flush(struct thread_info*, index)
+ * a2 a3
+ *
+ * Save coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that this function doesn't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_flush)
+
+ abi_entry_default
+
+ movi a4, .Lcp_regs_jump_table
+ addx8 a4, a3, a4
+ addx4 a3, a3, a4
+ l32i a4, a3, CP_REGS_TAB_SAVE
+ beqz a4, 1f
+ l32i a3, a3, CP_REGS_TAB_OFFSET
+ add a2, a2, a3
+ mov a7, a0
+ callx0 a4
+ mov a0, a7
+1:
+ abi_ret_default
+
+ENDPROC(coprocessor_flush)
+
+#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
new file mode 100644
index 000000000..272fff587
--- /dev/null
+++ b/arch/xtensa/kernel/entry.S
@@ -0,0 +1,2264 @@
+/*
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 - 2008 by Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/linkage.h>
+#include <linux/pgtable.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+#include <asm/thread_info.h>
+#include <asm/asm-uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/tlbflush.h>
+#include <variant/tie-asm.h>
+
+/*
+ * Macro to find first bit set in WINDOWBASE from the left + 1
+ *
+ * 100....0 -> 1
+ * 010....0 -> 2
+ * 000....1 -> WSBITS
+ */
+
+ .macro ffs_ws bit mask
+
+#if XCHAL_HAVE_NSA
+ nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
+ addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
+#else
+ movi \bit, WSBITS
+#if WSBITS > 16
+ _bltui \mask, 0x10000, 99f
+ addi \bit, \bit, -16
+ extui \mask, \mask, 16, 16
+#endif
+#if WSBITS > 8
+99: _bltui \mask, 0x100, 99f
+ addi \bit, \bit, -8
+ srli \mask, \mask, 8
+#endif
+99: _bltui \mask, 0x10, 99f
+ addi \bit, \bit, -4
+ srli \mask, \mask, 4
+99: _bltui \mask, 0x4, 99f
+ addi \bit, \bit, -2
+ srli \mask, \mask, 2
+99: _bltui \mask, 0x2, 99f
+ addi \bit, \bit, -1
+99:
+
+#endif
+ .endm
+
+
+ .macro irq_save flags tmp
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+ rsr \flags, ps
+ extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ bgei \tmp, LOCKLEVEL, 99f
+ rsil \tmp, LOCKLEVEL
+99:
+#else
+ movi \tmp, LOCKLEVEL
+ rsr \flags, ps
+ or \flags, \flags, \tmp
+ xsr \flags, ps
+ rsync
+#endif
+#else
+ rsil \flags, LOCKLEVEL
+#endif
+ .endm
+
+/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
+
+/*
+ * First-level exception handler for user exceptions.
+ * Save some special registers, extra states and all registers in the AR
+ * register file that were in use in the user task, and jump to the common
+ * exception code.
+ * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
+ * save them for kernel exceptions).
+ *
+ * Entry condition for user_exception:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original value in depc
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Entry condition for _user_exception:
+ *
+ * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
+ * excsave has been restored, and
+ * stack pointer (a1) has been set.
+ *
+ * Note: _user_exception might be at an odd address. Don't use call0..call12
+ */
+ .literal_position
+
+ENTRY(user_exception)
+
+ /* Save a1, a2, a3, and set SP. */
+
+ rsr a0, depc
+ s32i a1, a2, PT_AREG1
+ s32i a0, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+ mov a1, a2
+
+ .globl _user_exception
+_user_exception:
+
+ /* Save SAR and turn off single stepping */
+
+ movi a2, 0
+ wsr a2, depc # terminate user stack trace with 0
+ rsr a3, sar
+ xsr a2, icountlevel
+ s32i a3, a1, PT_SAR
+ s32i a2, a1, PT_ICOUNTLEVEL
+
+#if XCHAL_HAVE_THREADPTR
+ rur a2, threadptr
+ s32i a2, a1, PT_THREADPTR
+#endif
+
+ /* Rotate ws so that the current windowbase is at bit0. */
+ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
+
+#if defined(USER_SUPPORT_WINDOWED)
+ rsr a2, windowbase
+ rsr a3, windowstart
+ ssr a2
+ s32i a2, a1, PT_WINDOWBASE
+ s32i a3, a1, PT_WINDOWSTART
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2
+ srli a2, a2, 32-WSBITS
+ s32i a2, a1, PT_WMASK # needed for restoring registers
+#else
+ movi a2, 0
+ movi a3, 1
+ s32i a2, a1, PT_WINDOWBASE
+ s32i a3, a1, PT_WINDOWSTART
+ s32i a3, a1, PT_WMASK
+#endif
+
+ /* Save only live registers. */
+
+UABI_W _bbsi.l a2, 1, .Lsave_window_registers
+ s32i a4, a1, PT_AREG4
+ s32i a5, a1, PT_AREG5
+ s32i a6, a1, PT_AREG6
+ s32i a7, a1, PT_AREG7
+UABI_W _bbsi.l a2, 2, .Lsave_window_registers
+ s32i a8, a1, PT_AREG8
+ s32i a9, a1, PT_AREG9
+ s32i a10, a1, PT_AREG10
+ s32i a11, a1, PT_AREG11
+UABI_W _bbsi.l a2, 3, .Lsave_window_registers
+ s32i a12, a1, PT_AREG12
+ s32i a13, a1, PT_AREG13
+ s32i a14, a1, PT_AREG14
+ s32i a15, a1, PT_AREG15
+
+#if defined(USER_SUPPORT_WINDOWED)
+ /* If only one valid frame skip saving regs. */
+
+ beqi a2, 1, common_exception
+
+ /* Save the remaining registers.
+ * We have to save all registers up to the first '1' from
+ * the right, except the current frame (bit 0).
+ * Assume a2 is: 001001000110001
+ * All register frames starting from the top field to the marked '1'
+ * must be saved.
+ */
+.Lsave_window_registers:
+ addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
+ neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
+ and a3, a3, a2 # max. only one bit is set
+
+ /* Find number of frames to save */
+
+ ffs_ws a0, a3 # number of frames to the '1' from left
+
+ /* Store information into WMASK:
+ * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
+ * bits 4...: number of valid 4-register frames
+ */
+
+ slli a3, a0, 4 # number of frames to save in bits 8..4
+ extui a2, a2, 0, 4 # mask for the first 16 registers
+ or a2, a3, a2
+ s32i a2, a1, PT_WMASK # needed when we restore the reg-file
+
+ /* Save 4 registers at a time */
+
+1: rotw -1
+ s32i a0, a5, PT_AREG_END - 16
+ s32i a1, a5, PT_AREG_END - 12
+ s32i a2, a5, PT_AREG_END - 8
+ s32i a3, a5, PT_AREG_END - 4
+ addi a0, a4, -1
+ addi a1, a5, -16
+ _bnez a0, 1b
+
+ /* WINDOWBASE still in SAR! */
+
+ rsr a2, sar # original WINDOWBASE
+ movi a3, 1
+ ssl a2
+ sll a3, a3
+ wsr a3, windowstart # set corresponding WINDOWSTART bit
+ wsr a2, windowbase # and WINDOWSTART
+ rsync
+
+ /* We are back to the original stack pointer (a1) */
+#endif
+ /* Now, jump to the common exception handler. */
+
+ j common_exception
+
+ENDPROC(user_exception)
+
+/*
+ * First-level exit handler for kernel exceptions
+ * Save special registers and the live window frame.
+ * Note: Even though we changes the stack pointer, we don't have to do a
+ * MOVSP here, as we do that when we return from the exception.
+ * (See comment in the kernel exception exit code)
+ *
+ * Entry condition for kernel_exception:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Entry condition for _kernel_exception:
+ *
+ * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
+ * excsave has been restored, and
+ * stack pointer (a1) has been set.
+ *
+ * Note: _kernel_exception might be at an odd address. Don't use call0..call12
+ */
+
+ENTRY(kernel_exception)
+
+ /* Save a1, a2, a3, and set SP. */
+
+ rsr a0, depc # get a2
+ s32i a1, a2, PT_AREG1
+ s32i a0, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+ mov a1, a2
+
+ .globl _kernel_exception
+_kernel_exception:
+
+ /* Save SAR and turn off single stepping */
+
+ movi a2, 0
+ rsr a3, sar
+ xsr a2, icountlevel
+ s32i a3, a1, PT_SAR
+ s32i a2, a1, PT_ICOUNTLEVEL
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+ /* Rotate ws so that the current windowbase is at bit0. */
+ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
+
+ rsr a2, windowbase # don't need to save these, we only
+ rsr a3, windowstart # need shifted windowstart: windowmask
+ ssr a2
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2
+ srli a2, a2, 32-WSBITS
+ s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
+#endif
+
+ /* Save only the live window-frame */
+
+KABI_W _bbsi.l a2, 1, 1f
+ s32i a4, a1, PT_AREG4
+ s32i a5, a1, PT_AREG5
+ s32i a6, a1, PT_AREG6
+ s32i a7, a1, PT_AREG7
+KABI_W _bbsi.l a2, 2, 1f
+ s32i a8, a1, PT_AREG8
+ s32i a9, a1, PT_AREG9
+ s32i a10, a1, PT_AREG10
+ s32i a11, a1, PT_AREG11
+KABI_W _bbsi.l a2, 3, 1f
+ s32i a12, a1, PT_AREG12
+ s32i a13, a1, PT_AREG13
+ s32i a14, a1, PT_AREG14
+ s32i a15, a1, PT_AREG15
+
+#ifdef __XTENSA_WINDOWED_ABI__
+ _bnei a2, 1, 1f
+ /* Copy spill slots of a0 and a1 to imitate movsp
+ * in order to keep exception stack continuous
+ */
+ l32i a3, a1, PT_KERNEL_SIZE
+ l32i a0, a1, PT_KERNEL_SIZE + 4
+ s32e a3, a1, -16
+ s32e a0, a1, -12
+#endif
+1:
+ l32i a0, a1, PT_AREG0 # restore saved a0
+ wsr a0, depc
+
+/*
+ * This is the common exception handler.
+ * We get here from the user exception handler or simply by falling through
+ * from the kernel exception handler.
+ * Save the remaining special registers, switch to kernel mode, and jump
+ * to the second-level exception handler.
+ *
+ */
+
+common_exception:
+
+ /* Save some registers, disable loops and clear the syscall flag. */
+
+ rsr a2, debugcause
+ rsr a3, epc1
+ s32i a2, a1, PT_DEBUGCAUSE
+ s32i a3, a1, PT_PC
+
+ movi a2, NO_SYSCALL
+ rsr a3, excvaddr
+ s32i a2, a1, PT_SYSCALL
+ movi a2, 0
+ s32i a3, a1, PT_EXCVADDR
+#if XCHAL_HAVE_LOOPS
+ xsr a2, lcount
+ s32i a2, a1, PT_LCOUNT
+#endif
+
+#if XCHAL_HAVE_EXCLUSIVE
+ /* Clear exclusive access monitor set by interrupted code */
+ clrex
+#endif
+
+ /* It is now save to restore the EXC_TABLE_FIXUP variable. */
+
+ rsr a2, exccause
+ movi a3, 0
+ rsr a0, excsave1
+ s32i a2, a1, PT_EXCCAUSE
+ s32i a3, a0, EXC_TABLE_FIXUP
+
+ /* All unrecoverable states are saved on stack, now, and a1 is valid.
+ * Now we can allow exceptions again. In case we've got an interrupt
+ * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
+ * otherwise it's left unchanged.
+ *
+ * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
+ */
+
+ rsr a3, ps
+ s32i a3, a1, PT_PS # save ps
+
+#if XTENSA_FAKE_NMI
+ /* Correct PS needs to be saved in the PT_PS:
+ * - in case of exception or level-1 interrupt it's in the PS,
+ * and is already saved.
+ * - in case of medium level interrupt it's in the excsave2.
+ */
+ movi a0, EXCCAUSE_MAPPED_NMI
+ extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ beq a2, a0, .Lmedium_level_irq
+ bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
+ beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0
+
+.Lmedium_level_irq:
+ rsr a0, excsave2
+ s32i a0, a1, PT_PS # save medium-level interrupt ps
+ bgei a3, LOCKLEVEL, .Lexception
+
+.Llevel1_irq:
+ movi a3, LOCKLEVEL
+
+.Lexception:
+KABI_W movi a0, PS_WOE_MASK
+KABI_W or a3, a3, a0
+#else
+ addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
+ movi a0, LOCKLEVEL
+ extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ # a3 = PS.INTLEVEL
+ moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
+KABI_W movi a2, PS_WOE_MASK
+KABI_W or a3, a3, a2
+#endif
+
+ /* restore return address (or 0 if return to userspace) */
+ rsr a0, depc
+ wsr a3, ps
+ rsync # PS.WOE => rsync => overflow
+
+ /* Save lbeg, lend */
+#if XCHAL_HAVE_LOOPS
+ rsr a4, lbeg
+ rsr a3, lend
+ s32i a4, a1, PT_LBEG
+ s32i a3, a1, PT_LEND
+#endif
+
+ /* Save SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+ rsr a3, scompare1
+ s32i a3, a1, PT_SCOMPARE1
+#endif
+
+ /* Save optional registers. */
+
+ save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ rsr abi_tmp0, ps
+ extui abi_tmp0, abi_tmp0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ beqz abi_tmp0, 1f
+ abi_call trace_hardirqs_off
+1:
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+ l32i abi_tmp0, a1, PT_PS
+ bbci.l abi_tmp0, PS_UM_BIT, 1f
+ abi_call user_exit_callable
+1:
+#endif
+
+ /* Go to second-level dispatcher. Set up parameters to pass to the
+ * exception handler and call the exception handler.
+ */
+
+ l32i abi_arg1, a1, PT_EXCCAUSE # pass EXCCAUSE
+ rsr abi_tmp0, excsave1
+ addx4 abi_tmp0, abi_arg1, abi_tmp0
+ l32i abi_tmp0, abi_tmp0, EXC_TABLE_DEFAULT # load handler
+ mov abi_arg0, a1 # pass stack frame
+
+ /* Call the second-level handler */
+
+ abi_callx abi_tmp0
+
+ /* Jump here for exception exit */
+ .global common_exception_return
+common_exception_return:
+
+#if XTENSA_FAKE_NMI
+ l32i abi_tmp0, a1, PT_EXCCAUSE
+ movi abi_tmp1, EXCCAUSE_MAPPED_NMI
+ l32i abi_saved1, a1, PT_PS
+ beq abi_tmp0, abi_tmp1, .Lrestore_state
+#endif
+.Ltif_loop:
+ irq_save abi_tmp0, abi_tmp1
+#ifdef CONFIG_TRACE_IRQFLAGS
+ abi_call trace_hardirqs_off
+#endif
+
+ /* Jump if we are returning from kernel exceptions. */
+
+ l32i abi_saved1, a1, PT_PS
+ GET_THREAD_INFO(abi_tmp0, a1)
+ l32i abi_saved0, abi_tmp0, TI_FLAGS
+ _bbci.l abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel
+
+ /* Specific to a user exception exit:
+ * We need to check some flags for signal handling and rescheduling,
+ * and have to restore WB and WS, extra states, and all registers
+ * in the register file that were in use in the user task.
+ * Note that we don't disable interrupts here.
+ */
+
+ _bbsi.l abi_saved0, TIF_NEED_RESCHED, .Lresched
+ movi abi_tmp0, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
+ bnone abi_saved0, abi_tmp0, .Lexit_tif_loop_user
+
+ l32i abi_tmp0, a1, PT_DEPC
+ bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
+
+ /* Call do_signal() */
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ abi_call trace_hardirqs_on
+#endif
+ rsil abi_tmp0, 0
+ mov abi_arg0, a1
+ abi_call do_notify_resume # int do_notify_resume(struct pt_regs*)
+ j .Ltif_loop
+
+.Lresched:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ abi_call trace_hardirqs_on
+#endif
+ rsil abi_tmp0, 0
+ abi_call schedule # void schedule (void)
+ j .Ltif_loop
+
+.Lexit_tif_loop_kernel:
+#ifdef CONFIG_PREEMPTION
+ _bbci.l abi_saved0, TIF_NEED_RESCHED, .Lrestore_state
+
+ /* Check current_thread_info->preempt_count */
+
+ l32i abi_tmp1, abi_tmp0, TI_PRE_COUNT
+ bnez abi_tmp1, .Lrestore_state
+ abi_call preempt_schedule_irq
+#endif
+ j .Lrestore_state
+
+.Lexit_tif_loop_user:
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+ abi_call user_enter_callable
+#endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ _bbci.l abi_saved0, TIF_DB_DISABLED, 1f
+ abi_call restore_dbreak
+1:
+#endif
+#ifdef CONFIG_DEBUG_TLB_SANITY
+ l32i abi_tmp0, a1, PT_DEPC
+ bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
+ abi_call check_tlb_sanity
+#endif
+
+.Lrestore_state:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ extui abi_tmp0, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ bgei abi_tmp0, LOCKLEVEL, 1f
+ abi_call trace_hardirqs_on
+1:
+#endif
+ /*
+ * Restore optional registers.
+ * abi_arg* are used as temporary registers here.
+ */
+
+ load_xtregs_opt a1 abi_tmp0 abi_arg0 abi_arg1 abi_arg2 abi_arg3 PT_XTREGS_OPT
+
+ /* Restore SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+ l32i abi_tmp0, a1, PT_SCOMPARE1
+ wsr abi_tmp0, scompare1
+#endif
+ wsr abi_saved1, ps /* disable interrupts */
+ _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit
+
+user_exception_exit:
+
+ /* Restore the state of the task and return from the exception. */
+
+#if defined(USER_SUPPORT_WINDOWED)
+ /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
+
+ l32i a2, a1, PT_WINDOWBASE
+ l32i a3, a1, PT_WINDOWSTART
+ wsr a1, depc # use DEPC as temp storage
+ wsr a3, windowstart # restore WINDOWSTART
+ ssr a2 # preserve user's WB in the SAR
+ wsr a2, windowbase # switch to user's saved WB
+ rsync
+ rsr a1, depc # restore stack pointer
+ l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
+ rotw -1 # we restore a4..a7
+ _bltui a6, 16, .Lclear_regs # only have to restore current window?
+
+ /* The working registers are a0 and a3. We are restoring to
+ * a4..a7. Be careful not to destroy what we have just restored.
+ * Note: wmask has the format YYYYM:
+ * Y: number of registers saved in groups of 4
+ * M: 4 bit mask of first 16 registers
+ */
+
+ mov a2, a6
+ mov a3, a5
+
+1: rotw -1 # a0..a3 become a4..a7
+ addi a3, a7, -4*4 # next iteration
+ addi a2, a6, -16 # decrementing Y in WMASK
+ l32i a4, a3, PT_AREG_END + 0
+ l32i a5, a3, PT_AREG_END + 4
+ l32i a6, a3, PT_AREG_END + 8
+ l32i a7, a3, PT_AREG_END + 12
+ _bgeui a2, 16, 1b
+
+ /* Clear unrestored registers (don't leak anything to user-land */
+
+.Lclear_regs:
+ rsr a0, windowbase
+ rsr a3, sar
+ sub a3, a0, a3
+ beqz a3, 2f
+ extui a3, a3, 0, WBBITS
+
+1: rotw -1
+ addi a3, a7, -1
+ movi a4, 0
+ movi a5, 0
+ movi a6, 0
+ movi a7, 0
+ bgei a3, 1, 1b
+
+ /* We are back were we were when we started.
+ * Note: a2 still contains WMASK (if we've returned to the original
+ * frame where we had loaded a2), or at least the lower 4 bits
+ * (if we have restored WSBITS-1 frames).
+ */
+2:
+#else
+ movi a2, 1
+#endif
+#if XCHAL_HAVE_THREADPTR
+ l32i a3, a1, PT_THREADPTR
+ wur a3, threadptr
+#endif
+
+ j common_exception_exit
+
+ /* This is the kernel exception exit.
+ * We avoided to do a MOVSP when we entered the exception, but we
+ * have to do it here.
+ */
+
+kernel_exception_exit:
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+ /* Check if we have to do a movsp.
+ *
+ * We only have to do a movsp if the previous window-frame has
+ * been spilled to the *temporary* exception stack instead of the
+ * task's stack. This is the case if the corresponding bit in
+ * WINDOWSTART for the previous window-frame was set before
+ * (not spilled) but is zero now (spilled).
+ * If this bit is zero, all other bits except the one for the
+ * current window frame are also zero. So, we can use a simple test:
+ * 'and' WINDOWSTART and WINDOWSTART-1:
+ *
+ * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
+ *
+ * The result is zero only if one bit was set.
+ *
+ * (Note: We might have gone through several task switches before
+ * we come back to the current task, so WINDOWBASE might be
+ * different from the time the exception occurred.)
+ */
+
+ /* Test WINDOWSTART before and after the exception.
+ * We actually have WMASK, so we only have to test if it is 1 or not.
+ */
+
+ l32i a2, a1, PT_WMASK
+ _beqi a2, 1, common_exception_exit # Spilled before exception,jump
+
+ /* Test WINDOWSTART now. If spilled, do the movsp */
+
+ rsr a3, windowstart
+ addi a0, a3, -1
+ and a3, a3, a0
+ _bnez a3, common_exception_exit
+
+ /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
+
+ addi a0, a1, -16
+ l32i a3, a0, 0
+ l32i a4, a0, 4
+ s32i a3, a1, PT_KERNEL_SIZE + 0
+ s32i a4, a1, PT_KERNEL_SIZE + 4
+ l32i a3, a0, 8
+ l32i a4, a0, 12
+ s32i a3, a1, PT_KERNEL_SIZE + 8
+ s32i a4, a1, PT_KERNEL_SIZE + 12
+
+ /* Common exception exit.
+ * We restore the special register and the current window frame, and
+ * return from the exception.
+ *
+ * Note: We expect a2 to hold PT_WMASK
+ */
+#else
+ movi a2, 1
+#endif
+
+common_exception_exit:
+
+ /* Restore address registers. */
+
+ _bbsi.l a2, 1, 1f
+ l32i a4, a1, PT_AREG4
+ l32i a5, a1, PT_AREG5
+ l32i a6, a1, PT_AREG6
+ l32i a7, a1, PT_AREG7
+ _bbsi.l a2, 2, 1f
+ l32i a8, a1, PT_AREG8
+ l32i a9, a1, PT_AREG9
+ l32i a10, a1, PT_AREG10
+ l32i a11, a1, PT_AREG11
+ _bbsi.l a2, 3, 1f
+ l32i a12, a1, PT_AREG12
+ l32i a13, a1, PT_AREG13
+ l32i a14, a1, PT_AREG14
+ l32i a15, a1, PT_AREG15
+
+ /* Restore PC, SAR */
+
+1: l32i a2, a1, PT_PC
+ l32i a3, a1, PT_SAR
+ wsr a2, epc1
+ wsr a3, sar
+
+ /* Restore LBEG, LEND, LCOUNT */
+#if XCHAL_HAVE_LOOPS
+ l32i a2, a1, PT_LBEG
+ l32i a3, a1, PT_LEND
+ wsr a2, lbeg
+ l32i a2, a1, PT_LCOUNT
+ wsr a3, lend
+ wsr a2, lcount
+#endif
+
+ /* We control single stepping through the ICOUNTLEVEL register. */
+
+ l32i a2, a1, PT_ICOUNTLEVEL
+ movi a3, -2
+ wsr a2, icountlevel
+ wsr a3, icount
+
+ /* Check if it was double exception. */
+
+ l32i a0, a1, PT_DEPC
+ l32i a3, a1, PT_AREG3
+ l32i a2, a1, PT_AREG2
+ _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+ /* Restore a0...a3 and return */
+
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+ rfe
+
+1: wsr a0, depc
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+ rfde
+
+ENDPROC(kernel_exception)
+
+/*
+ * Debug exception handler.
+ *
+ * Currently, we don't support KGDB, so only user application can be debugged.
+ *
+ * When we get here, a0 is trashed and saved to excsave[debuglevel]
+ */
+
+ .literal_position
+
+ENTRY(debug_exception)
+
+ rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
+ bbsi.l a0, PS_EXCM_BIT, .Ldebug_exception_in_exception # exception mode
+
+ /* Set EPC1 and EXCCAUSE */
+
+ wsr a2, depc # save a2 temporarily
+ rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
+ wsr a2, epc1
+
+ movi a2, EXCCAUSE_MAPPED_DEBUG
+ wsr a2, exccause
+
+ /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
+
+ movi a2, 1 << PS_EXCM_BIT
+ or a2, a0, a2
+ wsr a2, ps
+
+ /* Switch to kernel/user stack, restore jump vector, and save a0 */
+
+ bbsi.l a2, PS_UM_BIT, .Ldebug_exception_user # jump if user mode
+ addi a2, a1, -16 - PT_KERNEL_SIZE # assume kernel stack
+
+.Ldebug_exception_continue:
+ l32i a0, a3, DT_DEBUG_SAVE
+ s32i a1, a2, PT_AREG1
+ s32i a0, a2, PT_AREG0
+ movi a0, 0
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+ xsr a0, depc
+ s32i a3, a2, PT_AREG3
+ s32i a0, a2, PT_AREG2
+ mov a1, a2
+
+ /* Debug exception is handled as an exception, so interrupts will
+ * likely be enabled in the common exception handler. Disable
+ * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
+ * meaning.
+ */
+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
+ GET_THREAD_INFO(a2, a1)
+ l32i a3, a2, TI_PRE_COUNT
+ addi a3, a3, 1
+ s32i a3, a2, TI_PRE_COUNT
+#endif
+
+ rsr a2, ps
+ bbsi.l a2, PS_UM_BIT, _user_exception
+ j _kernel_exception
+
+.Ldebug_exception_user:
+ rsr a2, excsave1
+ l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
+ j .Ldebug_exception_continue
+
+.Ldebug_exception_in_exception:
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ /* Debug exception while in exception mode. This may happen when
+ * window overflow/underflow handler or fast exception handler hits
+ * data breakpoint, in which case save and disable all data
+ * breakpoints, single-step faulting instruction and restore data
+ * breakpoints.
+ */
+
+ bbci.l a0, PS_UM_BIT, .Ldebug_exception_in_exception # jump if kernel mode
+
+ rsr a0, debugcause
+ bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
+
+ .set _index, 0
+ .rept XCHAL_NUM_DBREAK
+ l32i a0, a3, DT_DBREAKC_SAVE + _index * 4
+ wsr a0, SREG_DBREAKC + _index
+ .set _index, _index + 1
+ .endr
+
+ l32i a0, a3, DT_ICOUNT_LEVEL_SAVE
+ wsr a0, icountlevel
+
+ l32i a0, a3, DT_ICOUNT_SAVE
+ xsr a0, icount
+
+ l32i a0, a3, DT_DEBUG_SAVE
+ xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+ rfi XCHAL_DEBUGLEVEL
+
+.Ldebug_save_dbreak:
+ .set _index, 0
+ .rept XCHAL_NUM_DBREAK
+ movi a0, 0
+ xsr a0, SREG_DBREAKC + _index
+ s32i a0, a3, DT_DBREAKC_SAVE + _index * 4
+ .set _index, _index + 1
+ .endr
+
+ movi a0, XCHAL_EXCM_LEVEL + 1
+ xsr a0, icountlevel
+ s32i a0, a3, DT_ICOUNT_LEVEL_SAVE
+
+ movi a0, 0xfffffffe
+ xsr a0, icount
+ s32i a0, a3, DT_ICOUNT_SAVE
+
+ l32i a0, a3, DT_DEBUG_SAVE
+ xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+ rfi XCHAL_DEBUGLEVEL
+#else
+ /* Debug exception while in exception mode. Should not happen. */
+ j .Ldebug_exception_in_exception // FIXME!!
+#endif
+
+ENDPROC(debug_exception)
+
+/*
+ * We get here in case of an unrecoverable exception.
+ * The only thing we can do is to be nice and print a panic message.
+ * We only produce a single stack frame for panic, so ???
+ *
+ *
+ * Entry conditions:
+ *
+ * - a0 contains the caller address; original value saved in excsave1.
+ * - the original a0 contains a valid return address (backtrace) or 0.
+ * - a2 contains a valid stackpointer
+ *
+ * Notes:
+ *
+ * - If the stack pointer could be invalid, the caller has to setup a
+ * dummy stack pointer (e.g. the stack of the init_task)
+ *
+ * - If the return address could be invalid, the caller has to set it
+ * to 0, so the backtrace would stop.
+ *
+ */
+ .align 4
+unrecoverable_text:
+ .ascii "Unrecoverable error in exception handler\0"
+
+ .literal_position
+
+ENTRY(unrecoverable_exception)
+
+#if XCHAL_HAVE_WINDOWED
+ movi a0, 1
+ movi a1, 0
+
+ wsr a0, windowstart
+ wsr a1, windowbase
+ rsync
+#endif
+
+ movi a1, KERNEL_PS_WOE_MASK | LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+ movi a1, init_task
+ movi a0, 0
+ addi a1, a1, PT_REGS_OFFSET
+
+ movi abi_arg0, unrecoverable_text
+ abi_call panic
+
+1: j 1b
+
+ENDPROC(unrecoverable_exception)
+
+/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
+
+ __XTENSA_HANDLER
+ .literal_position
+
+#ifdef SUPPORT_WINDOWED
+/*
+ * Fast-handler for alloca exceptions
+ *
+ * The ALLOCA handler is entered when user code executes the MOVSP
+ * instruction and the caller's frame is not in the register file.
+ *
+ * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
+ *
+ * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
+ *
+ * It leverages the existing window spill/fill routines and their support for
+ * double exceptions. The 'movsp' instruction will only cause an exception if
+ * the next window needs to be loaded. In fact this ALLOCA exception may be
+ * replaced at some point by changing the hardware to do a underflow exception
+ * of the proper size instead.
+ *
+ * This algorithm simply backs out the register changes started by the user
+ * exception handler, makes it appear that we have started a window underflow
+ * by rotating the window back and then setting the old window base (OWB) in
+ * the 'ps' register with the rolled back window base. The 'movsp' instruction
+ * will be re-executed and this time since the next window frames is in the
+ * active AR registers it won't cause an exception.
+ *
+ * If the WindowUnderflow code gets a TLB miss the page will get mapped
+ * the partial WindowUnderflow will be handled in the double exception
+ * handler.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_alloca)
+ rsr a0, windowbase
+ rotw -1
+ rsr a2, ps
+ extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
+ xor a3, a3, a4
+ l32i a4, a6, PT_AREG0
+ l32i a1, a6, PT_DEPC
+ rsr a6, depc
+ wsr a1, depc
+ slli a3, a3, PS_OWB_SHIFT
+ xor a2, a2, a3
+ wsr a2, ps
+ rsync
+
+ _bbci.l a4, 31, 4f
+ rotw -1
+ _bbci.l a8, 30, 8f
+ rotw -1
+ j _WindowUnderflow12
+8: j _WindowUnderflow8
+4: j _WindowUnderflow4
+ENDPROC(fast_alloca)
+#endif
+
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+/*
+ * fast illegal instruction handler.
+ *
+ * This is used to fix up user PS.WOE on the exception caused
+ * by the first opcode related to register window. If PS.WOE is
+ * already set it goes directly to the common user exception handler.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ */
+
+ENTRY(fast_illegal_instruction_user)
+
+ rsr a0, ps
+ bbsi.l a0, PS_WOE_BIT, 1f
+ s32i a3, a2, PT_AREG3
+ movi a3, PS_WOE_MASK
+ or a0, a0, a3
+ wsr a0, ps
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ GET_THREAD_INFO(a3, a2)
+ rsr a0, epc1
+ s32i a0, a3, TI_PS_WOE_FIX_ADDR
+#endif
+ l32i a3, a2, PT_AREG3
+ l32i a0, a2, PT_AREG0
+ rsr a2, depc
+ rfe
+1:
+ call0 user_exception
+
+ENDPROC(fast_illegal_instruction_user)
+#endif
+
+ /*
+ * fast system calls.
+ *
+ * WARNING: The kernel doesn't save the entire user context before
+ * handling a fast system call. These functions are small and short,
+ * usually offering some functionality not available to user tasks.
+ *
+ * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ */
+
+ENTRY(fast_syscall_user)
+
+ /* Skip syscall. */
+
+ rsr a0, epc1
+ addi a0, a0, 3
+ wsr a0, epc1
+
+ l32i a0, a2, PT_DEPC
+ bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
+
+ rsr a0, depc # get syscall-nr
+ _beqz a0, fast_syscall_spill_registers
+ _beqi a0, __NR_xtensa, fast_syscall_xtensa
+
+ call0 user_exception
+
+ENDPROC(fast_syscall_user)
+
+ENTRY(fast_syscall_unrecoverable)
+
+ /* Restore all states. */
+
+ l32i a0, a2, PT_AREG0 # restore a0
+ xsr a2, depc # restore a2, depc
+
+ wsr a0, excsave1
+ call0 unrecoverable_exception
+
+ENDPROC(fast_syscall_unrecoverable)
+
+/*
+ * sysxtensa syscall handler
+ *
+ * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
+ * a2 a6 a3 a4 a5
+ *
+ * Entry condition:
+ *
+ * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in a0 and DEPC
+ * a3: a3
+ * a4..a15: unchanged
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Note: we don't have to save a2; a2 holds the return value
+ */
+
+ .literal_position
+
+#ifdef CONFIG_FAST_SYSCALL_XTENSA
+
+ENTRY(fast_syscall_xtensa)
+
+ s32i a7, a2, PT_AREG7 # we need an additional register
+ movi a7, 4 # sizeof(unsigned int)
+ access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
+
+ _bgeui a6, SYS_XTENSA_COUNT, .Lill
+ _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
+
+ /* Fall through for ATOMIC_CMP_SWP. */
+
+.Lswp: /* Atomic compare and swap */
+
+EX(.Leac) l32i a0, a3, 0 # read old value
+ bne a0, a4, 1f # same as old value? jump
+EX(.Leac) s32i a5, a3, 0 # different, modify value
+ l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, 1 # and return 1
+ rfe
+
+1: l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, 0 # return 0 (note that we cannot set
+ rfe
+
+.Lnswp: /* Atomic set, add, and exg_add. */
+
+EX(.Leac) l32i a7, a3, 0 # orig
+ addi a6, a6, -SYS_XTENSA_ATOMIC_SET
+ add a0, a4, a7 # + arg
+ moveqz a0, a4, a6 # set
+ addi a6, a6, SYS_XTENSA_ATOMIC_SET
+EX(.Leac) s32i a0, a3, 0 # write new value
+
+ mov a0, a2
+ mov a2, a7
+ l32i a7, a0, PT_AREG7 # restore a7
+ l32i a0, a0, PT_AREG0 # restore a0
+ rfe
+
+.Leac: l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, -EFAULT
+ rfe
+
+.Lill: l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, -EINVAL
+ rfe
+
+ENDPROC(fast_syscall_xtensa)
+
+#else /* CONFIG_FAST_SYSCALL_XTENSA */
+
+ENTRY(fast_syscall_xtensa)
+
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, -ENOSYS
+ rfe
+
+ENDPROC(fast_syscall_xtensa)
+
+#endif /* CONFIG_FAST_SYSCALL_XTENSA */
+
+
+/* fast_syscall_spill_registers.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
+ */
+
+#if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTERS) && \
+ defined(USER_SUPPORT_WINDOWED)
+
+ENTRY(fast_syscall_spill_registers)
+
+ /* Register a FIXUP handler (pass current wb as a parameter) */
+
+ xsr a3, excsave1
+ movi a0, fast_syscall_spill_registers_fixup
+ s32i a0, a3, EXC_TABLE_FIXUP
+ rsr a0, windowbase
+ s32i a0, a3, EXC_TABLE_PARAM
+ xsr a3, excsave1 # restore a3 and excsave_1
+
+ /* Save a3, a4 and SAR on stack. */
+
+ rsr a0, sar
+ s32i a3, a2, PT_AREG3
+ s32i a0, a2, PT_SAR
+
+ /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
+
+ s32i a4, a2, PT_AREG4
+ s32i a7, a2, PT_AREG7
+ s32i a8, a2, PT_AREG8
+ s32i a11, a2, PT_AREG11
+ s32i a12, a2, PT_AREG12
+ s32i a15, a2, PT_AREG15
+
+ /*
+ * Rotate ws so that the current windowbase is at bit 0.
+ * Assume ws = xxxwww1yy (www1 current window frame).
+ * Rotate ws right so that a4 = yyxxxwww1.
+ */
+
+ rsr a0, windowbase
+ rsr a3, windowstart # a3 = xxxwww1yy
+ ssr a0 # holds WB
+ slli a0, a3, WSBITS
+ or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy
+ srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
+
+ /* We are done if there are no more than the current register frame. */
+
+ extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
+ movi a0, (1 << (WSBITS-1))
+ _beqz a3, .Lnospill # only one active frame? jump
+
+ /* We want 1 at the top, so that we return to the current windowbase */
+
+ or a3, a3, a0 # 1yyxxxwww
+
+ /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
+
+ wsr a3, windowstart # save shifted windowstart
+ neg a0, a3
+ and a3, a0, a3 # first bit set from right: 000010000
+
+ ffs_ws a0, a3 # a0: shifts to skip empty frames
+ movi a3, WSBITS
+ sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right
+ ssr a0 # save in SAR for later.
+
+ rsr a3, windowbase
+ add a3, a3, a0
+ wsr a3, windowbase
+ rsync
+
+ rsr a3, windowstart
+ srl a3, a3 # shift windowstart
+
+ /* WB is now just one frame below the oldest frame in the register
+ window. WS is shifted so the oldest frame is in bit 0, thus, WB
+ and WS differ by one 4-register frame. */
+
+ /* Save frames. Depending what call was used (call4, call8, call12),
+ * we have to save 4,8. or 12 registers.
+ */
+
+
+.Lloop: _bbsi.l a3, 1, .Lc4
+ _bbci.l a3, 2, .Lc12
+
+.Lc8: s32e a4, a13, -16
+ l32e a4, a5, -12
+ s32e a8, a4, -32
+ s32e a5, a13, -12
+ s32e a6, a13, -8
+ s32e a7, a13, -4
+ s32e a9, a4, -28
+ s32e a10, a4, -24
+ s32e a11, a4, -20
+ srli a11, a3, 2 # shift windowbase by 2
+ rotw 2
+ _bnei a3, 1, .Lloop
+ j .Lexit
+
+.Lc4: s32e a4, a9, -16
+ s32e a5, a9, -12
+ s32e a6, a9, -8
+ s32e a7, a9, -4
+
+ srli a7, a3, 1
+ rotw 1
+ _bnei a3, 1, .Lloop
+ j .Lexit
+
+.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
+
+ /* 12-register frame (call12) */
+
+ l32e a0, a5, -12
+ s32e a8, a0, -48
+ mov a8, a0
+
+ s32e a9, a8, -44
+ s32e a10, a8, -40
+ s32e a11, a8, -36
+ s32e a12, a8, -32
+ s32e a13, a8, -28
+ s32e a14, a8, -24
+ s32e a15, a8, -20
+ srli a15, a3, 3
+
+ /* The stack pointer for a4..a7 is out of reach, so we rotate the
+ * window, grab the stackpointer, and rotate back.
+ * Alternatively, we could also use the following approach, but that
+ * makes the fixup routine much more complicated:
+ * rotw 1
+ * s32e a0, a13, -16
+ * ...
+ * rotw 2
+ */
+
+ rotw 1
+ mov a4, a13
+ rotw -1
+
+ s32e a4, a8, -16
+ s32e a5, a8, -12
+ s32e a6, a8, -8
+ s32e a7, a8, -4
+
+ rotw 3
+
+ _beqi a3, 1, .Lexit
+ j .Lloop
+
+.Lexit:
+
+ /* Done. Do the final rotation and set WS */
+
+ rotw 1
+ rsr a3, windowbase
+ ssl a3
+ movi a3, 1
+ sll a3, a3
+ wsr a3, windowstart
+.Lnospill:
+
+ /* Advance PC, restore registers and SAR, and return from exception. */
+
+ l32i a3, a2, PT_SAR
+ l32i a0, a2, PT_AREG0
+ wsr a3, sar
+ l32i a3, a2, PT_AREG3
+
+ /* Restore clobbered registers. */
+
+ l32i a4, a2, PT_AREG4
+ l32i a7, a2, PT_AREG7
+ l32i a8, a2, PT_AREG8
+ l32i a11, a2, PT_AREG11
+ l32i a12, a2, PT_AREG12
+ l32i a15, a2, PT_AREG15
+
+ movi a2, 0
+ rfe
+
+.Linvalid_mask:
+
+ /* We get here because of an unrecoverable error in the window
+ * registers, so set up a dummy frame and kill the user application.
+ * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
+ */
+
+ movi a0, 1
+ movi a1, 0
+
+ wsr a0, windowstart
+ wsr a1, windowbase
+ rsync
+
+ movi a0, 0
+
+ rsr a3, excsave1
+ l32i a1, a3, EXC_TABLE_KSTK
+
+ movi a4, KERNEL_PS_WOE_MASK | LOCKLEVEL
+ wsr a4, ps
+ rsync
+
+ movi abi_arg0, SIGSEGV
+ abi_call make_task_dead
+
+ /* shouldn't return, so panic */
+
+ wsr a0, excsave1
+ call0 unrecoverable_exception # should not return
+1: j 1b
+
+
+ENDPROC(fast_syscall_spill_registers)
+
+/* Fixup handler.
+ *
+ * We get here if the spill routine causes an exception, e.g. tlb miss.
+ * We basically restore WINDOWBASE and WINDOWSTART to the condition when
+ * we entered the spill routine and jump to the user exception handler.
+ *
+ * Note that we only need to restore the bits in windowstart that have not
+ * been spilled yet by the _spill_register routine. Luckily, a3 contains a
+ * rotated windowstart with only those bits set for frames that haven't been
+ * spilled yet. Because a3 is rotated such that bit 0 represents the register
+ * frame for the current windowbase - 1, we need to rotate a3 left by the
+ * value of the current windowbase + 1 and move it to windowstart.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(fast_syscall_spill_registers_fixup)
+
+ rsr a2, windowbase # get current windowbase (a2 is saved)
+ xsr a0, depc # restore depc and a0
+ ssl a2 # set shift (32 - WB)
+
+ /* We need to make sure the current registers (a0-a3) are preserved.
+ * To do this, we simply set the bit for the current window frame
+ * in WS, so that the exception handlers save them to the task stack.
+ *
+ * Note: we use a3 to set the windowbase, so we take a special care
+ * of it, saving it in the original _spill_registers frame across
+ * the exception handler call.
+ */
+
+ xsr a3, excsave1 # get spill-mask
+ slli a3, a3, 1 # shift left by one
+ addi a3, a3, 1 # set the bit for the current window frame
+
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
+ wsr a2, windowstart # set corrected windowstart
+
+ srli a3, a3, 1
+ rsr a2, excsave1
+ l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
+ xsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
+ l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
+ xsr a2, excsave1
+
+ /* Return to the original (user task) WINDOWBASE.
+ * We leave the following frame behind:
+ * a0, a1, a2 same
+ * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
+ * depc: depc (we have to return to that address)
+ * excsave_1: exctable
+ */
+
+ wsr a3, windowbase
+ rsync
+
+ /* We are now in the original frame when we entered _spill_registers:
+ * a0: return address
+ * a1: used, stack pointer
+ * a2: kernel stack pointer
+ * a3: available
+ * depc: exception address
+ * excsave: exctable
+ * Note: This frame might be the same as above.
+ */
+
+ /* Setup stack pointer. */
+
+ addi a2, a2, -PT_USER_SIZE
+ s32i a0, a2, PT_AREG0
+
+ /* Make sure we return to this fixup handler. */
+
+ movi a3, fast_syscall_spill_registers_fixup_return
+ s32i a3, a2, PT_DEPC # setup depc
+
+ /* Jump to the exception handler. */
+
+ rsr a3, excsave1
+ rsr a0, exccause
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+ jx a0
+
+ENDPROC(fast_syscall_spill_registers_fixup)
+
+ENTRY(fast_syscall_spill_registers_fixup_return)
+
+ /* When we return here, all registers have been restored (a2: DEPC) */
+
+ wsr a2, depc # exception address
+
+ /* Restore fixup handler. */
+
+ rsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
+ movi a3, fast_syscall_spill_registers_fixup
+ s32i a3, a2, EXC_TABLE_FIXUP
+ rsr a3, windowbase
+ s32i a3, a2, EXC_TABLE_PARAM
+ l32i a2, a2, EXC_TABLE_KSTK
+
+ /* Load WB at the time the exception occurred. */
+
+ rsr a3, sar # WB is still in SAR
+ neg a3, a3
+ wsr a3, windowbase
+ rsync
+
+ rsr a3, excsave1
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+
+ rfde
+
+ENDPROC(fast_syscall_spill_registers_fixup_return)
+
+#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
+
+ENTRY(fast_syscall_spill_registers)
+
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, -ENOSYS
+ rfe
+
+ENDPROC(fast_syscall_spill_registers)
+
+#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
+
+#ifdef CONFIG_MMU
+/*
+ * We should never get here. Bail out!
+ */
+
+ENTRY(fast_second_level_miss_double_kernel)
+
+1:
+ call0 unrecoverable_exception # should not return
+1: j 1b
+
+ENDPROC(fast_second_level_miss_double_kernel)
+
+/* First-level entry handler for user, kernel, and double 2nd-level
+ * TLB miss exceptions. Note that for now, user and kernel miss
+ * exceptions share the same entry point and are handled identically.
+ *
+ * An old, less-efficient C version of this function used to exist.
+ * We include it below, interleaved as comments, for reference.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_second_level_miss)
+
+ /* Save a1 and a3. Note: we don't expect a double exception. */
+
+ s32i a1, a2, PT_AREG1
+ s32i a3, a2, PT_AREG3
+
+ /* We need to map the page of PTEs for the user task. Find
+ * the pointer to that page. Also, it's possible for tsk->mm
+ * to be NULL while tsk->active_mm is nonzero if we faulted on
+ * a vmalloc address. In that rare case, we must use
+ * active_mm instead to avoid a fault in this handler. See
+ *
+ * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
+ * (or search Internet on "mm vs. active_mm")
+ *
+ * if (!mm)
+ * mm = tsk->active_mm;
+ * pgd = pgd_offset (mm, regs->excvaddr);
+ * pmd = pmd_offset (pgd, regs->excvaddr);
+ * pmdval = *pmd;
+ */
+
+ GET_CURRENT(a1,a2)
+ l32i a0, a1, TASK_MM # tsk->mm
+ beqz a0, .Lfast_second_level_miss_no_mm
+
+.Lfast_second_level_miss_continue:
+ rsr a3, excvaddr # fault address
+ _PGD_OFFSET(a0, a3, a1)
+ l32i a0, a0, 0 # read pmdval
+ beqz a0, .Lfast_second_level_miss_no_pmd
+
+ /* Read ptevaddr and convert to top of page-table page.
+ *
+ * vpnval = read_ptevaddr_register() & PAGE_MASK;
+ * vpnval += DTLB_WAY_PGTABLE;
+ * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
+ * write_dtlb_entry (pteval, vpnval);
+ *
+ * The messy computation for 'pteval' above really simplifies
+ * into the following:
+ *
+ * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
+ * | PAGE_DIRECTORY
+ */
+
+ movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
+ add a0, a0, a1 # pmdval - PAGE_OFFSET
+ extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
+ xor a0, a0, a1
+
+ movi a1, _PAGE_DIRECTORY
+ or a0, a0, a1 # ... | PAGE_DIRECTORY
+
+ /*
+ * We utilize all three wired-ways (7-9) to hold pmd translations.
+ * Memory regions are mapped to the DTLBs according to bits 28 and 29.
+ * This allows to map the three most common regions to three different
+ * DTLBs:
+ * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
+ * 2 -> way 8 shared libaries (2000.0000)
+ * 3 -> way 0 stack (3000.0000)
+ */
+
+ extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
+ rsr a1, ptevaddr
+ addx2 a3, a3, a3 # -> 0,3,6,9
+ srli a1, a1, PAGE_SHIFT
+ extui a3, a3, 2, 2 # -> 0,0,1,2
+ slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
+ addi a3, a3, DTLB_WAY_PGD
+ add a1, a1, a3 # ... + way_number
+
+.Lfast_second_level_miss_wdtlb:
+ wdtlb a0, a1
+ dsync
+
+ /* Exit critical section. */
+.Lfast_second_level_miss_skip_wdtlb:
+ rsr a3, excsave1
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+
+ /* Restore the working registers, and return. */
+
+ l32i a0, a2, PT_AREG0
+ l32i a1, a2, PT_AREG1
+ l32i a3, a2, PT_AREG3
+ l32i a2, a2, PT_DEPC
+
+ bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+ /* Restore excsave1 and return. */
+
+ rsr a2, depc
+ rfe
+
+ /* Return from double exception. */
+
+1: xsr a2, depc
+ esync
+ rfde
+
+.Lfast_second_level_miss_no_mm:
+ l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ bnez a0, .Lfast_second_level_miss_continue
+
+ /* Even more unlikely case active_mm == 0.
+ * We can get here with NMI in the middle of context_switch that
+ * touches vmalloc area.
+ */
+ movi a0, init_mm
+ j .Lfast_second_level_miss_continue
+
+.Lfast_second_level_miss_no_pmd:
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+ /* Special case for cache aliasing.
+ * We (should) only get here if a clear_user_page, copy_user_page
+ * or the aliased cache flush functions got preemptively interrupted
+ * by another task. Re-establish temporary mapping to the
+ * TLBTEMP_BASE areas.
+ */
+
+ /* We shouldn't be in a double exception */
+
+ l32i a0, a2, PT_DEPC
+ bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lfast_second_level_miss_slow
+
+ /* Make sure the exception originated in the special functions */
+
+ movi a0, __tlbtemp_mapping_start
+ rsr a3, epc1
+ bltu a3, a0, .Lfast_second_level_miss_slow
+ movi a0, __tlbtemp_mapping_end
+ bgeu a3, a0, .Lfast_second_level_miss_slow
+
+ /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
+
+ movi a3, TLBTEMP_BASE_1
+ rsr a0, excvaddr
+ bltu a0, a3, .Lfast_second_level_miss_slow
+
+ addi a1, a0, -TLBTEMP_SIZE
+ bgeu a1, a3, .Lfast_second_level_miss_slow
+
+ /* Check if we have to restore an ITLB mapping. */
+
+ movi a1, __tlbtemp_mapping_itlb
+ rsr a3, epc1
+ sub a3, a3, a1
+
+ /* Calculate VPN */
+
+ movi a1, PAGE_MASK
+ and a1, a1, a0
+
+ /* Jump for ITLB entry */
+
+ bgez a3, 1f
+
+ /* We can use up to two TLBTEMP areas, one for src and one for dst. */
+
+ extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
+ add a1, a3, a1
+
+ /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
+
+ mov a0, a6
+ movnez a0, a7, a3
+ j .Lfast_second_level_miss_wdtlb
+
+ /* ITLB entry. We only use dst in a6. */
+
+1: witlb a6, a1
+ isync
+ j .Lfast_second_level_miss_skip_wdtlb
+
+
+#endif // DCACHE_WAY_SIZE > PAGE_SIZE
+
+ /* Invalid PGD, default exception handling */
+.Lfast_second_level_miss_slow:
+
+ rsr a1, depc
+ s32i a1, a2, PT_AREG2
+ mov a1, a2
+
+ rsr a2, ps
+ bbsi.l a2, PS_UM_BIT, 1f
+ call0 _kernel_exception
+1: call0 _user_exception
+
+ENDPROC(fast_second_level_miss)
+
+/*
+ * StoreProhibitedException
+ *
+ * Update the pte and invalidate the itlb mapping for this pte.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_store_prohibited)
+
+ /* Save a1 and a3. */
+
+ s32i a1, a2, PT_AREG1
+ s32i a3, a2, PT_AREG3
+
+ GET_CURRENT(a1,a2)
+ l32i a0, a1, TASK_MM # tsk->mm
+ beqz a0, .Lfast_store_no_mm
+
+.Lfast_store_continue:
+ rsr a1, excvaddr # fault address
+ _PGD_OFFSET(a0, a1, a3)
+ l32i a0, a0, 0
+ beqz a0, .Lfast_store_slow
+
+ /*
+ * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
+ * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
+ */
+
+ _PTE_OFFSET(a0, a1, a3)
+ l32i a3, a0, 0 # read pteval
+ movi a1, _PAGE_CA_INVALID
+ ball a3, a1, .Lfast_store_slow
+ bbci.l a3, _PAGE_WRITABLE_BIT, .Lfast_store_slow
+
+ movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
+ or a3, a3, a1
+ rsr a1, excvaddr
+ s32i a3, a0, 0
+
+ /* We need to flush the cache if we have page coloring. */
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+ dhwb a0, 0
+#endif
+ pdtlb a0, a1
+ wdtlb a3, a0
+
+ /* Exit critical section. */
+
+ movi a0, 0
+ rsr a3, excsave1
+ s32i a0, a3, EXC_TABLE_FIXUP
+
+ /* Restore the working registers, and return. */
+
+ l32i a3, a2, PT_AREG3
+ l32i a1, a2, PT_AREG1
+ l32i a0, a2, PT_AREG0
+ l32i a2, a2, PT_DEPC
+
+ bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+ rsr a2, depc
+ rfe
+
+ /* Double exception. Restore FIXUP handler and return. */
+
+1: xsr a2, depc
+ esync
+ rfde
+
+.Lfast_store_no_mm:
+ l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ j .Lfast_store_continue
+
+ /* If there was a problem, handle fault in C */
+.Lfast_store_slow:
+ rsr a1, excvaddr
+ pdtlb a0, a1
+ bbci.l a0, DTLB_HIT_BIT, 1f
+ idtlb a0
+1:
+ rsr a3, depc # still holds a2
+ s32i a3, a2, PT_AREG2
+ mov a1, a2
+
+ rsr a2, ps
+ bbsi.l a2, PS_UM_BIT, 1f
+ call0 _kernel_exception
+1: call0 _user_exception
+
+ENDPROC(fast_store_prohibited)
+
+#endif /* CONFIG_MMU */
+
+ .text
+/*
+ * System Calls.
+ *
+ * void system_call (struct pt_regs* regs, int exccause)
+ * a2 a3
+ */
+ .literal_position
+
+ENTRY(system_call)
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+ abi_entry_default
+#elif defined(__XTENSA_CALL0_ABI__)
+ abi_entry(12)
+
+ s32i a0, sp, 0
+ s32i abi_saved0, sp, 4
+ s32i abi_saved1, sp, 8
+ mov abi_saved0, a2
+#else
+#error Unsupported Xtensa ABI
+#endif
+
+ /* regs->syscall = regs->areg[2] */
+
+ l32i a7, abi_saved0, PT_AREG2
+ s32i a7, abi_saved0, PT_SYSCALL
+
+ GET_THREAD_INFO(a4, a1)
+ l32i abi_saved1, a4, TI_FLAGS
+ movi a4, _TIF_WORK_MASK
+ and abi_saved1, abi_saved1, a4
+ beqz abi_saved1, 1f
+
+ mov abi_arg0, abi_saved0
+ abi_call do_syscall_trace_enter
+ beqz abi_rv, .Lsyscall_exit
+ l32i a7, abi_saved0, PT_SYSCALL
+
+1:
+ /* syscall = sys_call_table[syscall_nr] */
+
+ movi a4, sys_call_table
+ movi a5, __NR_syscalls
+ movi abi_rv, -ENOSYS
+ bgeu a7, a5, 1f
+
+ addx4 a4, a7, a4
+ l32i abi_tmp0, a4, 0
+
+ /* Load args: arg0 - arg5 are passed via regs. */
+
+ l32i abi_arg0, abi_saved0, PT_AREG6
+ l32i abi_arg1, abi_saved0, PT_AREG3
+ l32i abi_arg2, abi_saved0, PT_AREG4
+ l32i abi_arg3, abi_saved0, PT_AREG5
+ l32i abi_arg4, abi_saved0, PT_AREG8
+ l32i abi_arg5, abi_saved0, PT_AREG9
+
+ abi_callx abi_tmp0
+
+1: /* regs->areg[2] = return_value */
+
+ s32i abi_rv, abi_saved0, PT_AREG2
+ bnez abi_saved1, 1f
+.Lsyscall_exit:
+#if defined(__XTENSA_WINDOWED_ABI__)
+ abi_ret_default
+#elif defined(__XTENSA_CALL0_ABI__)
+ l32i a0, sp, 0
+ l32i abi_saved0, sp, 4
+ l32i abi_saved1, sp, 8
+ abi_ret(12)
+#else
+#error Unsupported Xtensa ABI
+#endif
+
+1:
+ mov abi_arg0, abi_saved0
+ abi_call do_syscall_trace_leave
+ j .Lsyscall_exit
+
+ENDPROC(system_call)
+
+/*
+ * Spill live registers on the kernel stack macro.
+ *
+ * Entry condition: ps.woe is set, ps.excm is cleared
+ * Exit condition: windowstart has single bit set
+ * May clobber: a12, a13
+ */
+ .macro spill_registers_kernel
+
+#if XCHAL_NUM_AREGS > 16
+ call12 1f
+ _j 2f
+ retw
+ .align 4
+1:
+ _entry a1, 48
+ addi a12, a0, 3
+#if XCHAL_NUM_AREGS > 32
+ .rept (XCHAL_NUM_AREGS - 32) / 12
+ _entry a1, 48
+ mov a12, a0
+ .endr
+#endif
+ _entry a1, 16
+#if XCHAL_NUM_AREGS % 12 == 0
+ mov a8, a8
+#elif XCHAL_NUM_AREGS % 12 == 4
+ mov a12, a12
+#elif XCHAL_NUM_AREGS % 12 == 8
+ mov a4, a4
+#endif
+ retw
+2:
+#else
+ mov a12, a12
+#endif
+ .endm
+
+/*
+ * Task switch.
+ *
+ * struct task* _switch_to (struct task* prev, struct task* next)
+ * a2 a2 a3
+ */
+
+ENTRY(_switch_to)
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+ abi_entry(XTENSA_SPILL_STACK_RESERVE)
+#elif defined(__XTENSA_CALL0_ABI__)
+ abi_entry(16)
+
+ s32i a12, sp, 0
+ s32i a13, sp, 4
+ s32i a14, sp, 8
+ s32i a15, sp, 12
+#else
+#error Unsupported Xtensa ABI
+#endif
+ mov a11, a3 # and 'next' (a3)
+
+ l32i a4, a2, TASK_THREAD_INFO
+ l32i a5, a3, TASK_THREAD_INFO
+
+ save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
+
+#if THREAD_RA > 1020 || THREAD_SP > 1020
+ addi a10, a2, TASK_THREAD
+ s32i a0, a10, THREAD_RA - TASK_THREAD # save return address
+ s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer
+#else
+ s32i a0, a2, THREAD_RA # save return address
+ s32i a1, a2, THREAD_SP # save stack pointer
+#endif
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ movi a6, __stack_chk_guard
+ l32i a8, a3, TASK_STACK_CANARY
+ s32i a8, a6, 0
+#endif
+
+ /* Disable ints while we manipulate the stack pointer. */
+
+ irq_save a14, a3
+ rsync
+
+ /* Switch CPENABLE */
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ l32i a3, a5, THREAD_CPENABLE
+#ifdef CONFIG_SMP
+ beqz a3, 1f
+ memw # pairs with memw (2) in fast_coprocessor
+ l32i a6, a5, THREAD_CP_OWNER_CPU
+ l32i a7, a5, THREAD_CPU
+ beq a6, a7, 1f # load 0 into CPENABLE if current CPU is not the owner
+ movi a3, 0
+1:
+#endif
+ wsr a3, cpenable
+#endif
+
+#if XCHAL_HAVE_EXCLUSIVE
+ l32i a3, a5, THREAD_ATOMCTL8
+ getex a3
+ s32i a3, a4, THREAD_ATOMCTL8
+#endif
+
+ /* Flush register file. */
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+ spill_registers_kernel
+#endif
+
+ /* Set kernel stack (and leave critical section)
+ * Note: It's save to set it here. The stack will not be overwritten
+ * because the kernel stack will only be loaded again after
+ * we return from kernel space.
+ */
+
+ rsr a3, excsave1 # exc_table
+ addi a7, a5, PT_REGS_OFFSET
+ s32i a7, a3, EXC_TABLE_KSTK
+
+ /* restore context of the task 'next' */
+
+ l32i a0, a11, THREAD_RA # restore return address
+ l32i a1, a11, THREAD_SP # restore stack pointer
+
+ load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
+
+ wsr a14, ps
+ rsync
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+ abi_ret(XTENSA_SPILL_STACK_RESERVE)
+#elif defined(__XTENSA_CALL0_ABI__)
+ l32i a12, sp, 0
+ l32i a13, sp, 4
+ l32i a14, sp, 8
+ l32i a15, sp, 12
+ abi_ret(16)
+#else
+#error Unsupported Xtensa ABI
+#endif
+
+ENDPROC(_switch_to)
+
+ENTRY(ret_from_fork)
+
+ /* void schedule_tail (struct task_struct *prev)
+ * Note: prev is still in abi_arg0 (return value from fake call frame)
+ */
+ abi_call schedule_tail
+
+ mov abi_arg0, a1
+ abi_call do_syscall_trace_leave
+ j common_exception_return
+
+ENDPROC(ret_from_fork)
+
+/*
+ * Kernel thread creation helper
+ * On entry, set up by copy_thread: abi_saved0 = thread_fn,
+ * abi_saved1 = thread_fn arg. Left from _switch_to: abi_arg0 = prev
+ */
+ENTRY(ret_from_kernel_thread)
+
+ abi_call schedule_tail
+ mov abi_arg0, abi_saved1
+ abi_callx abi_saved0
+ j common_exception_return
+
+ENDPROC(ret_from_kernel_thread)
+
+#ifdef CONFIG_HIBERNATION
+
+ .section .bss, "aw"
+ .align 4
+.Lsaved_regs:
+#if defined(__XTENSA_WINDOWED_ABI__)
+ .fill 2, 4
+#elif defined(__XTENSA_CALL0_ABI__)
+ .fill 6, 4
+#else
+#error Unsupported Xtensa ABI
+#endif
+ .align XCHAL_NCP_SA_ALIGN
+.Lsaved_user_regs:
+ .fill XTREGS_USER_SIZE, 1
+
+ .previous
+
+ENTRY(swsusp_arch_suspend)
+
+ abi_entry_default
+
+ movi a2, .Lsaved_regs
+ movi a3, .Lsaved_user_regs
+ s32i a0, a2, 0
+ s32i a1, a2, 4
+ save_xtregs_user a3 a4 a5 a6 a7 a8 0
+#if defined(__XTENSA_WINDOWED_ABI__)
+ spill_registers_kernel
+#elif defined(__XTENSA_CALL0_ABI__)
+ s32i a12, a2, 8
+ s32i a13, a2, 12
+ s32i a14, a2, 16
+ s32i a15, a2, 20
+#else
+#error Unsupported Xtensa ABI
+#endif
+ abi_call swsusp_save
+ mov a2, abi_rv
+ abi_ret_default
+
+ENDPROC(swsusp_arch_suspend)
+
+ENTRY(swsusp_arch_resume)
+
+ abi_entry_default
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+ spill_registers_kernel
+#endif
+
+ movi a2, restore_pblist
+ l32i a2, a2, 0
+
+.Lcopy_pbe:
+ l32i a3, a2, PBE_ADDRESS
+ l32i a4, a2, PBE_ORIG_ADDRESS
+
+ __loopi a3, a9, PAGE_SIZE, 16
+ l32i a5, a3, 0
+ l32i a6, a3, 4
+ l32i a7, a3, 8
+ l32i a8, a3, 12
+ addi a3, a3, 16
+ s32i a5, a4, 0
+ s32i a6, a4, 4
+ s32i a7, a4, 8
+ s32i a8, a4, 12
+ addi a4, a4, 16
+ __endl a3, a9
+
+ l32i a2, a2, PBE_NEXT
+ bnez a2, .Lcopy_pbe
+
+ movi a2, .Lsaved_regs
+ movi a3, .Lsaved_user_regs
+ l32i a0, a2, 0
+ l32i a1, a2, 4
+ load_xtregs_user a3 a4 a5 a6 a7 a8 0
+#if defined(__XTENSA_CALL0_ABI__)
+ l32i a12, a2, 8
+ l32i a13, a2, 12
+ l32i a14, a2, 16
+ l32i a15, a2, 20
+#endif
+ movi a2, 0
+ abi_ret_default
+
+ENDPROC(swsusp_arch_resume)
+
+#endif
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
new file mode 100644
index 000000000..8484294bc
--- /dev/null
+++ b/arch/xtensa/kernel/head.S
@@ -0,0 +1,386 @@
+/*
+ * arch/xtensa/kernel/head.S
+ *
+ * Xtensa Processor startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Kevin Chea
+ */
+
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+/*
+ * This module contains the entry code for kernel images. It performs the
+ * minimal setup needed to call the generic C routines.
+ *
+ * Prerequisites:
+ *
+ * - The kernel image has been loaded to the actual address where it was
+ * compiled to.
+ * - a2 contains either 0 or a pointer to a list of boot parameters.
+ * (see setup.c for more details)
+ *
+ */
+
+/*
+ * _start
+ *
+ * The bootloader passes a pointer to a list of boot parameters in a2.
+ */
+
+ /* The first bytes of the kernel image must be an instruction, so we
+ * manually allocate and define the literal constant we need for a jx
+ * instruction.
+ */
+
+ __HEAD
+ .begin no-absolute-literals
+
+ENTRY(_start)
+
+ /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+ wsr a2, excsave1
+ _j _SetupOCD
+
+ .align 4
+ .literal_position
+_SetupOCD:
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+#if XCHAL_HAVE_WINDOWED
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+#endif
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+ .global _SetupMMU
+_SetupMMU:
+ Offset = _SetupMMU - _start
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ rsr a2, excsave1
+ movi a3, XCHAL_KSEG_PADDR
+ bltu a2, a3, 1f
+ sub a2, a2, a3
+ movi a3, XCHAL_KSEG_SIZE
+ bgeu a2, a3, 1f
+ movi a3, XCHAL_KSEG_CACHED_VADDR
+ add a2, a2, a3
+ wsr a2, excsave1
+1:
+#endif
+#endif
+
+ movi a0, _startup
+ jx a0
+
+ENDPROC(_start)
+ .end no-absolute-literals
+
+ __REF
+ .literal_position
+
+ENTRY(_startup)
+
+ /* Set a0 to 0 for the remaining initialization. */
+
+ movi a0, 0
+
+#if XCHAL_HAVE_VECBASE
+ movi a2, VECBASE_VADDR
+ wsr a2, vecbase
+#endif
+
+ /* Clear debugging registers. */
+
+#if XCHAL_HAVE_DEBUG
+#if XCHAL_NUM_IBREAK > 0
+ wsr a0, ibreakenable
+#endif
+ wsr a0, icount
+ movi a1, 15
+ wsr a0, icountlevel
+
+ .set _index, 0
+ .rept XCHAL_NUM_DBREAK
+ wsr a0, SREG_DBREAKC + _index
+ .set _index, _index + 1
+ .endr
+#endif
+
+ /* Clear CCOUNT (not really necessary, but nice) */
+
+ wsr a0, ccount # not really necessary, but nice
+
+ /* Disable zero-loops. */
+
+#if XCHAL_HAVE_LOOPS
+ wsr a0, lcount
+#endif
+
+ /* Disable all timers. */
+
+ .set _index, 0
+ .rept XCHAL_NUM_TIMERS
+ wsr a0, SREG_CCOMPARE + _index
+ .set _index, _index + 1
+ .endr
+
+ /* Interrupt initialization. */
+
+ movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
+ wsr a0, intenable
+ wsr a2, intclear
+
+ /* Disable coprocessors. */
+
+#if XCHAL_HAVE_CP
+ wsr a0, cpenable
+#endif
+
+ /* Initialize the caches.
+ * a2, a3 are just working registers (clobbered).
+ */
+
+#if XCHAL_DCACHE_LINE_LOCKABLE
+ ___unlock_dcache_all a2 a3
+#endif
+
+#if XCHAL_ICACHE_LINE_LOCKABLE
+ ___unlock_icache_all a2 a3
+#endif
+
+ ___invalidate_dcache_all a2 a3
+ ___invalidate_icache_all a2 a3
+
+ isync
+
+ initialize_cacheattr
+
+#ifdef CONFIG_HAVE_SMP
+ movi a2, CCON # MX External Register to Configure Cache
+ movi a3, 1
+ wer a3, a2
+#endif
+
+ /* Setup stack and enable window exceptions (keep irqs disabled) */
+
+ movi a1, start_info
+ l32i a1, a1, 0
+
+ /* Disable interrupts. */
+ /* Enable window exceptions if kernel is built with windowed ABI. */
+ movi a2, KERNEL_PS_WOE_MASK | LOCKLEVEL
+ wsr a2, ps
+ rsync
+
+#ifdef CONFIG_SMP
+ /*
+ * Notice that we assume with SMP that cores have PRID
+ * supported by the cores.
+ */
+ rsr a2, prid
+ bnez a2, .Lboot_secondary
+
+#endif /* CONFIG_SMP */
+
+ /* Unpack data sections
+ *
+ * The linker script used to build the Linux kernel image
+ * creates a table located at __boot_reloc_table_start
+ * that contains the information what data needs to be unpacked.
+ *
+ * Uses a2-a7.
+ */
+
+ movi a2, __boot_reloc_table_start
+ movi a3, __boot_reloc_table_end
+
+1: beq a2, a3, 3f # no more entries?
+ l32i a4, a2, 0 # start destination (in RAM)
+ l32i a5, a2, 4 # end destination (in RAM)
+ l32i a6, a2, 8 # start source (in ROM)
+ addi a2, a2, 12 # next entry
+ beq a4, a5, 1b # skip, empty entry
+ beq a4, a6, 1b # skip, source and dest. are the same
+
+2: l32i a7, a6, 0 # load word
+ addi a6, a6, 4
+ s32i a7, a4, 0 # store word
+ addi a4, a4, 4
+ bltu a4, a5, 2b
+ j 1b
+
+3:
+ /* All code and initialized data segments have been copied.
+ * Now clear the BSS segment.
+ */
+
+ movi a2, __bss_start # start of BSS
+ movi a3, __bss_stop # end of BSS
+
+ __loopt a2, a3, a4, 2
+ s32i a0, a2, 0
+ __endla a2, a3, 4
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+
+ /* After unpacking, flush the writeback cache to memory so the
+ * instructions/data are available.
+ */
+
+ ___flush_dcache_all a2 a3
+#endif
+ memw
+ isync
+ ___invalidate_icache_all a2 a3
+ isync
+
+#ifdef CONFIG_XIP_KERNEL
+ /* Setup bootstrap CPU stack in XIP kernel */
+
+ movi a1, start_info
+ l32i a1, a1, 0
+#endif
+
+ movi abi_arg0, 0
+ xsr abi_arg0, excsave1
+
+ /* init_arch kick-starts the linux kernel */
+
+ abi_call init_arch
+ abi_call start_kernel
+
+should_never_return:
+ j should_never_return
+
+#ifdef CONFIG_SMP
+.Lboot_secondary:
+
+ movi a2, cpu_start_ccount
+1:
+ memw
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ movi a3, 0
+ s32i a3, a2, 0
+1:
+ memw
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ wsr a3, ccount
+ movi a3, 0
+ s32i a3, a2, 0
+ memw
+
+ movi abi_arg0, 0
+ wsr abi_arg0, excsave1
+
+ abi_call secondary_start_kernel
+ j should_never_return
+
+#endif /* CONFIG_SMP */
+
+ENDPROC(_startup)
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+ENTRY(cpu_restart)
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+ ___flush_invalidate_dcache_all a2 a3
+#else
+ ___invalidate_dcache_all a2 a3
+#endif
+ memw
+ movi a2, CCON # MX External Register to Configure Cache
+ movi a3, 0
+ wer a3, a2
+ extw
+
+ rsr a0, prid
+ neg a2, a0
+ movi a3, cpu_start_id
+ memw
+ s32i a2, a3, 0
+#if XCHAL_DCACHE_IS_WRITEBACK
+ dhwbi a3, 0
+#endif
+1:
+ memw
+ l32i a2, a3, 0
+ dhi a3, 0
+ bne a2, a0, 1b
+
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+ j _startup
+
+ENDPROC(cpu_restart)
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * DATA section
+ */
+
+ __REFDATA
+ .align 4
+ENTRY(start_info)
+ .long init_thread_union + KERNEL_STACK_SIZE
+
+/*
+ * BSS section
+ */
+
+__PAGE_ALIGNED_BSS
+#ifdef CONFIG_MMU
+ENTRY(swapper_pg_dir)
+ .fill PAGE_SIZE, 1, 0
+END(swapper_pg_dir)
+#endif
+ENTRY(empty_zero_page)
+ .fill PAGE_SIZE, 1, 0
+END(empty_zero_page)
diff --git a/arch/xtensa/kernel/hibernate.c b/arch/xtensa/kernel/hibernate.c
new file mode 100644
index 000000000..06984327d
--- /dev/null
+++ b/arch/xtensa/kernel/hibernate.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/mm.h>
+#include <linux/suspend.h>
+#include <asm/coprocessor.h>
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
+
+ return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
+}
+
+void notrace save_processor_state(void)
+{
+ WARN_ON(num_online_cpus() != 1);
+#if XTENSA_HAVE_COPROCESSORS
+ local_coprocessors_flush_release_all();
+#endif
+}
+
+void notrace restore_processor_state(void)
+{
+}
diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c
new file mode 100644
index 000000000..285fb2942
--- /dev/null
+++ b/arch/xtensa/kernel/hw_breakpoint.c
@@ -0,0 +1,308 @@
+/*
+ * Xtensa hardware breakpoints/watchpoints handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#include <linux/hw_breakpoint.h>
+#include <linux/log2.h>
+#include <linux/percpu.h>
+#include <linux/perf_event.h>
+#include <asm/core.h>
+
+/* Breakpoint currently in use for each IBREAKA. */
+static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]);
+
+/* Watchpoint currently in use for each DBREAKA. */
+static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[XCHAL_NUM_DBREAK]);
+
+int hw_breakpoint_slots(int type)
+{
+ switch (type) {
+ case TYPE_INST:
+ return XCHAL_NUM_IBREAK;
+ case TYPE_DATA:
+ return XCHAL_NUM_DBREAK;
+ default:
+ pr_warn("unknown slot type: %d\n", type);
+ return 0;
+ }
+}
+
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
+{
+ unsigned int len;
+ unsigned long va;
+
+ va = hw->address;
+ len = hw->len;
+
+ return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+/*
+ * Construct an arch_hw_breakpoint from a perf_event.
+ */
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ /* Type */
+ switch (attr->bp_type) {
+ case HW_BREAKPOINT_X:
+ hw->type = XTENSA_BREAKPOINT_EXECUTE;
+ break;
+ case HW_BREAKPOINT_R:
+ hw->type = XTENSA_BREAKPOINT_LOAD;
+ break;
+ case HW_BREAKPOINT_W:
+ hw->type = XTENSA_BREAKPOINT_STORE;
+ break;
+ case HW_BREAKPOINT_RW:
+ hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Len */
+ hw->len = attr->bp_len;
+ if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len))
+ return -EINVAL;
+
+ /* Address */
+ hw->address = attr->bp_addr;
+ if (hw->address & (hw->len - 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+static void xtensa_wsr(unsigned long v, u8 sr)
+{
+ /* We don't have indexed wsr and creating instruction dynamically
+ * doesn't seem worth it given how small XCHAL_NUM_IBREAK and
+ * XCHAL_NUM_DBREAK are. Thus the switch. In case build breaks here
+ * the switch below needs to be extended.
+ */
+ BUILD_BUG_ON(XCHAL_NUM_IBREAK > 2);
+ BUILD_BUG_ON(XCHAL_NUM_DBREAK > 2);
+
+ switch (sr) {
+#if XCHAL_NUM_IBREAK > 0
+ case SREG_IBREAKA + 0:
+ xtensa_set_sr(v, SREG_IBREAKA + 0);
+ break;
+#endif
+#if XCHAL_NUM_IBREAK > 1
+ case SREG_IBREAKA + 1:
+ xtensa_set_sr(v, SREG_IBREAKA + 1);
+ break;
+#endif
+
+#if XCHAL_NUM_DBREAK > 0
+ case SREG_DBREAKA + 0:
+ xtensa_set_sr(v, SREG_DBREAKA + 0);
+ break;
+ case SREG_DBREAKC + 0:
+ xtensa_set_sr(v, SREG_DBREAKC + 0);
+ break;
+#endif
+#if XCHAL_NUM_DBREAK > 1
+ case SREG_DBREAKA + 1:
+ xtensa_set_sr(v, SREG_DBREAKA + 1);
+ break;
+
+ case SREG_DBREAKC + 1:
+ xtensa_set_sr(v, SREG_DBREAKC + 1);
+ break;
+#endif
+ }
+}
+
+static int alloc_slot(struct perf_event **slot, size_t n,
+ struct perf_event *bp)
+{
+ size_t i;
+
+ for (i = 0; i < n; ++i) {
+ if (!slot[i]) {
+ slot[i] = bp;
+ return i;
+ }
+ }
+ return -EBUSY;
+}
+
+static void set_ibreak_regs(int reg, struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ unsigned long ibreakenable;
+
+ xtensa_wsr(info->address, SREG_IBREAKA + reg);
+ ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE);
+ xtensa_set_sr(ibreakenable | (1 << reg), SREG_IBREAKENABLE);
+}
+
+static void set_dbreak_regs(int reg, struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ unsigned long dbreakc = DBREAKC_MASK_MASK & -info->len;
+
+ if (info->type & XTENSA_BREAKPOINT_LOAD)
+ dbreakc |= DBREAKC_LOAD_MASK;
+ if (info->type & XTENSA_BREAKPOINT_STORE)
+ dbreakc |= DBREAKC_STOR_MASK;
+
+ xtensa_wsr(info->address, SREG_DBREAKA + reg);
+ xtensa_wsr(dbreakc, SREG_DBREAKC + reg);
+}
+
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ int i;
+
+ if (counter_arch_bp(bp)->type == XTENSA_BREAKPOINT_EXECUTE) {
+ /* Breakpoint */
+ i = alloc_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
+ if (i < 0)
+ return i;
+ set_ibreak_regs(i, bp);
+
+ } else {
+ /* Watchpoint */
+ i = alloc_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
+ if (i < 0)
+ return i;
+ set_dbreak_regs(i, bp);
+ }
+ return 0;
+}
+
+static int free_slot(struct perf_event **slot, size_t n,
+ struct perf_event *bp)
+{
+ size_t i;
+
+ for (i = 0; i < n; ++i) {
+ if (slot[i] == bp) {
+ slot[i] = NULL;
+ return i;
+ }
+ }
+ return -EBUSY;
+}
+
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ int i;
+
+ if (info->type == XTENSA_BREAKPOINT_EXECUTE) {
+ unsigned long ibreakenable;
+
+ /* Breakpoint */
+ i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
+ if (i >= 0) {
+ ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE);
+ xtensa_set_sr(ibreakenable & ~(1 << i),
+ SREG_IBREAKENABLE);
+ }
+ } else {
+ /* Watchpoint */
+ i = free_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
+ if (i >= 0)
+ xtensa_wsr(0, SREG_DBREAKC + i);
+ }
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+}
+
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ int i;
+ struct thread_struct *t = &tsk->thread;
+
+ for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
+ if (t->ptrace_bp[i]) {
+ unregister_hw_breakpoint(t->ptrace_bp[i]);
+ t->ptrace_bp[i] = NULL;
+ }
+ }
+ for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
+ if (t->ptrace_wp[i]) {
+ unregister_hw_breakpoint(t->ptrace_wp[i]);
+ t->ptrace_wp[i] = NULL;
+ }
+ }
+}
+
+/*
+ * Set ptrace breakpoint pointers to zero for this task.
+ * This is required in order to prevent child processes from unregistering
+ * breakpoints held by their parent.
+ */
+void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp));
+ memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp));
+}
+
+void restore_dbreak(void)
+{
+ int i;
+
+ for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
+ struct perf_event *bp = this_cpu_ptr(wp_on_reg)[i];
+
+ if (bp)
+ set_dbreak_regs(i, bp);
+ }
+ clear_thread_flag(TIF_DB_DISABLED);
+}
+
+int check_hw_breakpoint(struct pt_regs *regs)
+{
+ if (regs->debugcause & BIT(DEBUGCAUSE_IBREAK_BIT)) {
+ int i;
+ struct perf_event **bp = this_cpu_ptr(bp_on_reg);
+
+ for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
+ if (bp[i] && !bp[i]->attr.disabled &&
+ regs->pc == bp[i]->attr.bp_addr)
+ perf_bp_event(bp[i], regs);
+ }
+ return 0;
+ } else if (regs->debugcause & BIT(DEBUGCAUSE_DBREAK_BIT)) {
+ struct perf_event **bp = this_cpu_ptr(wp_on_reg);
+ int dbnum = (regs->debugcause & DEBUGCAUSE_DBNUM_MASK) >>
+ DEBUGCAUSE_DBNUM_SHIFT;
+
+ if (dbnum < XCHAL_NUM_DBREAK && bp[dbnum]) {
+ if (user_mode(regs)) {
+ perf_bp_event(bp[dbnum], regs);
+ } else {
+ set_thread_flag(TIF_DB_DISABLED);
+ xtensa_wsr(0, SREG_DBREAKC + dbnum);
+ }
+ } else {
+ WARN_ONCE(1,
+ "Wrong/unconfigured DBNUM reported in DEBUGCAUSE: %d\n",
+ dbnum);
+ }
+ return 0;
+ }
+ return -ENOENT;
+}
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
new file mode 100644
index 000000000..42f106004
--- /dev/null
+++ b/arch/xtensa/kernel/irq.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/arch/xtensa/kernel/irq.c
+ *
+ * Xtensa built-in interrupt controller and some generic functions copied
+ * from i386.
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/xtensa-mx.h>
+#include <linux/irqchip/xtensa-pic.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+
+#include <asm/mxregs.h>
+#include <linux/uaccess.h>
+#include <asm/platform.h>
+
+DECLARE_PER_CPU(unsigned long, nmi_count);
+
+asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
+{
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /* Debugging check for stack overflow: is there less than 1KB free? */
+ {
+ unsigned long sp = current_stack_pointer;
+
+ sp &= THREAD_SIZE - 1;
+
+ if (unlikely(sp < (sizeof(thread_info) + 1024)))
+ printk("Stack overflow in do_IRQ: %ld\n",
+ sp - sizeof(struct thread_info));
+ }
+#endif
+ generic_handle_domain_irq(NULL, hwirq);
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ unsigned cpu __maybe_unused;
+#ifdef CONFIG_SMP
+ show_ipi_list(p, prec);
+#endif
+#if XTENSA_FAKE_NMI
+ seq_printf(p, "%*s:", prec, "NMI");
+ for_each_online_cpu(cpu)
+ seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
+ seq_puts(p, " Non-maskable interrupts\n");
+#endif
+ return 0;
+}
+
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+ unsigned long int_irq, unsigned long ext_irq,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1 || intsize > 2))
+ return -EINVAL;
+ if (intsize == 2 && intspec[1] == 1) {
+ int_irq = xtensa_map_ext_irq(ext_irq);
+ if (int_irq < XCHAL_NUM_INTERRUPTS)
+ *out_hwirq = int_irq;
+ else
+ return -EINVAL;
+ } else {
+ *out_hwirq = int_irq;
+ }
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *irq_chip = d->host_data;
+ u32 mask = 1 << hw;
+
+ if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_simple_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_percpu_irq, "timer");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+#ifdef XCHAL_INTTYPE_MASK_PROFILING
+ } else if (mask & XCHAL_INTTYPE_MASK_PROFILING) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_percpu_irq, "profiling");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+#endif
+ } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
+ /* XCHAL_INTTYPE_MASK_NMI */
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ }
+ return 0;
+}
+
+unsigned xtensa_map_ext_irq(unsigned ext_irq)
+{
+ unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
+ unsigned i;
+
+ for (i = 0; mask; ++i, mask >>= 1) {
+ if ((mask & 1) && ext_irq-- == 0)
+ return i;
+ }
+ return XCHAL_NUM_INTERRUPTS;
+}
+
+unsigned xtensa_get_ext_irq_no(unsigned irq)
+{
+ unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL) &
+ ((1u << irq) - 1);
+ return hweight32(mask);
+}
+
+void __init init_IRQ(void)
+{
+#ifdef CONFIG_USE_OF
+ irqchip_init();
+#else
+#ifdef CONFIG_HAVE_SMP
+ xtensa_mx_init_legacy(NULL);
+#else
+ xtensa_pic_init_legacy(NULL);
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+ ipi_init();
+#endif
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The CPU has been marked offline. Migrate IRQs off this CPU. If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
+{
+ unsigned int i, cpu = smp_processor_id();
+
+ for_each_active_irq(i) {
+ struct irq_data *data = irq_get_irq_data(i);
+ const struct cpumask *mask;
+ unsigned int newcpu;
+
+ if (irqd_is_per_cpu(data))
+ continue;
+
+ mask = irq_data_get_affinity_mask(data);
+ if (!cpumask_test_cpu(cpu, mask))
+ continue;
+
+ newcpu = cpumask_any_and(mask, cpu_online_mask);
+
+ if (newcpu >= nr_cpu_ids) {
+ pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, cpu);
+
+ irq_set_affinity(i, cpu_all_mask);
+ } else {
+ irq_set_affinity(i, mask);
+ }
+ }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
new file mode 100644
index 000000000..ad1841cec
--- /dev/null
+++ b/arch/xtensa/kernel/jump_label.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Cadence Design Systems Inc.
+
+#include <linux/cpu.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/stop_machine.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+
+#define J_OFFSET_MASK 0x0003ffff
+#define J_SIGN_MASK (~(J_OFFSET_MASK >> 1))
+
+#if defined(__XTENSA_EL__)
+#define J_INSN 0x6
+#define NOP_INSN 0x0020f0
+#elif defined(__XTENSA_EB__)
+#define J_INSN 0x60000000
+#define NOP_INSN 0x0f020000
+#else
+#error Unsupported endianness.
+#endif
+
+struct patch {
+ atomic_t cpu_count;
+ unsigned long addr;
+ size_t sz;
+ const void *data;
+};
+
+static void local_patch_text(unsigned long addr, const void *data, size_t sz)
+{
+ memcpy((void *)addr, data, sz);
+ local_flush_icache_range(addr, addr + sz);
+}
+
+static int patch_text_stop_machine(void *data)
+{
+ struct patch *patch = data;
+
+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
+ local_patch_text(patch->addr, patch->data, patch->sz);
+ atomic_inc(&patch->cpu_count);
+ } else {
+ while (atomic_read(&patch->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ __invalidate_icache_range(patch->addr, patch->sz);
+ }
+ return 0;
+}
+
+static void patch_text(unsigned long addr, const void *data, size_t sz)
+{
+ if (IS_ENABLED(CONFIG_SMP)) {
+ struct patch patch = {
+ .cpu_count = ATOMIC_INIT(0),
+ .addr = addr,
+ .sz = sz,
+ .data = data,
+ };
+ stop_machine_cpuslocked(patch_text_stop_machine,
+ &patch, cpu_online_mask);
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ local_patch_text(addr, data, sz);
+ local_irq_restore(flags);
+ }
+}
+
+void arch_jump_label_transform(struct jump_entry *e,
+ enum jump_label_type type)
+{
+ u32 d = (jump_entry_target(e) - (jump_entry_code(e) + 4));
+ u32 insn;
+
+ /* Jump only works within 128K of the J instruction. */
+ BUG_ON(!((d & J_SIGN_MASK) == 0 ||
+ (d & J_SIGN_MASK) == J_SIGN_MASK));
+
+ if (type == JUMP_LABEL_JMP) {
+#if defined(__XTENSA_EL__)
+ insn = ((d & J_OFFSET_MASK) << 6) | J_INSN;
+#elif defined(__XTENSA_EB__)
+ insn = ((d & J_OFFSET_MASK) << 8) | J_INSN;
+#endif
+ } else {
+ insn = NOP_INSN;
+ }
+
+ patch_text(jump_entry_code(e), &insn, JUMP_LABEL_NOP_SIZE);
+}
diff --git a/arch/xtensa/kernel/mcount.S b/arch/xtensa/kernel/mcount.S
new file mode 100644
index 000000000..51daaf4e0
--- /dev/null
+++ b/arch/xtensa/kernel/mcount.S
@@ -0,0 +1,85 @@
+/*
+ * arch/xtensa/kernel/mcount.S
+ *
+ * Xtensa specific mcount support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/ftrace.h>
+
+/*
+ * Entry condition:
+ *
+ * a2: a0 of the caller in windowed ABI
+ * a10: a0 of the caller in call0 ABI
+ *
+ * In call0 ABI the function _mcount is called with the special ABI:
+ * its argument is in a10 and all the usual argument registers (a2 - a7)
+ * must be preserved in addition to callee-saved a12 - a15.
+ */
+
+ENTRY(_mcount)
+#if defined(__XTENSA_WINDOWED_ABI__)
+ abi_entry_default
+
+ movi a4, ftrace_trace_function
+ l32i a4, a4, 0
+ movi a3, ftrace_stub
+ bne a3, a4, 1f
+ abi_ret_default
+
+1: xor a7, a2, a1
+ movi a3, 0x3fffffff
+ and a7, a7, a3
+ xor a7, a7, a1
+
+ xor a6, a0, a1
+ and a6, a6, a3
+ xor a6, a6, a1
+ addi a6, a6, -MCOUNT_INSN_SIZE
+ callx4 a4
+
+ abi_ret_default
+#elif defined(__XTENSA_CALL0_ABI__)
+ abi_entry_default
+
+ movi a9, ftrace_trace_function
+ l32i a9, a9, 0
+ movi a11, ftrace_stub
+ bne a9, a11, 1f
+ abi_ret_default
+
+1: abi_entry(28)
+ s32i a0, sp, 0
+ s32i a2, sp, 4
+ s32i a3, sp, 8
+ s32i a4, sp, 12
+ s32i a5, sp, 16
+ s32i a6, sp, 20
+ s32i a7, sp, 24
+ addi a2, a10, -MCOUNT_INSN_SIZE
+ callx0 a9
+ l32i a0, sp, 0
+ l32i a2, sp, 4
+ l32i a3, sp, 8
+ l32i a4, sp, 12
+ l32i a5, sp, 16
+ l32i a6, sp, 20
+ l32i a7, sp, 24
+ abi_ret(28)
+#else
+#error Unsupported Xtensa ABI
+#endif
+ENDPROC(_mcount)
+
+ENTRY(ftrace_stub)
+ abi_entry_default
+ abi_ret_default
+ENDPROC(ftrace_stub)
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
new file mode 100644
index 000000000..902845dda
--- /dev/null
+++ b/arch/xtensa/kernel/module.c
@@ -0,0 +1,189 @@
+/*
+ * arch/xtensa/kernel/module.c
+ *
+ * Module support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2006 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/cache.h>
+
+static int
+decode_calln_opcode (unsigned char *location)
+{
+#ifdef __XTENSA_EB__
+ return (location[0] & 0xf0) == 0x50;
+#endif
+#ifdef __XTENSA_EL__
+ return (location[0] & 0xf) == 0x5;
+#endif
+}
+
+static int
+decode_l32r_opcode (unsigned char *location)
+{
+#ifdef __XTENSA_EB__
+ return (location[0] & 0xf0) == 0x10;
+#endif
+#ifdef __XTENSA_EL__
+ return (location[0] & 0xf) == 0x1;
+#endif
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *mod)
+{
+ unsigned int i;
+ Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ unsigned char *location;
+ uint32_t value;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
+ location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rela[i].r_offset;
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rela[i].r_info);
+ value = sym->st_value + rela[i].r_addend;
+
+ switch (ELF32_R_TYPE(rela[i].r_info)) {
+ case R_XTENSA_NONE:
+ case R_XTENSA_DIFF8:
+ case R_XTENSA_DIFF16:
+ case R_XTENSA_DIFF32:
+ case R_XTENSA_ASM_EXPAND:
+ break;
+
+ case R_XTENSA_32:
+ case R_XTENSA_PLT:
+ *(uint32_t *)location += value;
+ break;
+
+ case R_XTENSA_SLOT0_OP:
+ if (decode_calln_opcode(location)) {
+ value -= ((unsigned long)location & -4) + 4;
+ if ((value & 3) != 0 ||
+ ((value + (1 << 19)) >> 20) != 0) {
+ pr_err("%s: relocation out of range, "
+ "section %d reloc %d "
+ "sym '%s'\n",
+ mod->name, relsec, i,
+ strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+ value = (signed int)value >> 2;
+#ifdef __XTENSA_EB__
+ location[0] = ((location[0] & ~0x3) |
+ ((value >> 16) & 0x3));
+ location[1] = (value >> 8) & 0xff;
+ location[2] = value & 0xff;
+#endif
+#ifdef __XTENSA_EL__
+ location[0] = ((location[0] & ~0xc0) |
+ ((value << 6) & 0xc0));
+ location[1] = (value >> 2) & 0xff;
+ location[2] = (value >> 10) & 0xff;
+#endif
+ } else if (decode_l32r_opcode(location)) {
+ value -= (((unsigned long)location + 3) & -4);
+ if ((value & 3) != 0 ||
+ (signed int)value >> 18 != -1) {
+ pr_err("%s: relocation out of range, "
+ "section %d reloc %d "
+ "sym '%s'\n",
+ mod->name, relsec, i,
+ strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+ value = (signed int)value >> 2;
+
+#ifdef __XTENSA_EB__
+ location[1] = (value >> 8) & 0xff;
+ location[2] = value & 0xff;
+#endif
+#ifdef __XTENSA_EL__
+ location[1] = value & 0xff;
+ location[2] = (value >> 8) & 0xff;
+#endif
+ }
+ /* FIXME: Ignore any other opcodes. The Xtensa
+ assembler currently assumes that the linker will
+ always do relaxation and so all PC-relative
+ operands need relocations. (The assembler also
+ writes out the tentative PC-relative values,
+ assuming no link-time relaxation, so it is usually
+ safe to ignore the relocations.) If the
+ assembler's "--no-link-relax" flag can be made to
+ work, and if all kernel modules can be assembled
+ with that flag, then unexpected relocations could
+ be detected here. */
+ break;
+
+ case R_XTENSA_SLOT1_OP:
+ case R_XTENSA_SLOT2_OP:
+ case R_XTENSA_SLOT3_OP:
+ case R_XTENSA_SLOT4_OP:
+ case R_XTENSA_SLOT5_OP:
+ case R_XTENSA_SLOT6_OP:
+ case R_XTENSA_SLOT7_OP:
+ case R_XTENSA_SLOT8_OP:
+ case R_XTENSA_SLOT9_OP:
+ case R_XTENSA_SLOT10_OP:
+ case R_XTENSA_SLOT11_OP:
+ case R_XTENSA_SLOT12_OP:
+ case R_XTENSA_SLOT13_OP:
+ case R_XTENSA_SLOT14_OP:
+ pr_err("%s: unexpected FLIX relocation: %u\n",
+ mod->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+
+ case R_XTENSA_SLOT0_ALT:
+ case R_XTENSA_SLOT1_ALT:
+ case R_XTENSA_SLOT2_ALT:
+ case R_XTENSA_SLOT3_ALT:
+ case R_XTENSA_SLOT4_ALT:
+ case R_XTENSA_SLOT5_ALT:
+ case R_XTENSA_SLOT6_ALT:
+ case R_XTENSA_SLOT7_ALT:
+ case R_XTENSA_SLOT8_ALT:
+ case R_XTENSA_SLOT9_ALT:
+ case R_XTENSA_SLOT10_ALT:
+ case R_XTENSA_SLOT11_ALT:
+ case R_XTENSA_SLOT12_ALT:
+ case R_XTENSA_SLOT13_ALT:
+ case R_XTENSA_SLOT14_ALT:
+ pr_err("%s: unexpected ALT relocation: %u\n",
+ mod->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+
+ default:
+ pr_err("%s: unexpected relocation: %u\n",
+ mod->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S
new file mode 100644
index 000000000..b702c0908
--- /dev/null
+++ b/arch/xtensa/kernel/mxhead.S
@@ -0,0 +1,64 @@
+/*
+ * Xtensa Secondary Processors startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
+#include <asm/regs.h>
+
+
+ .section .SecondaryResetVector.text, "ax"
+
+
+ENTRY(_SecondaryResetVector)
+ _j _SetupOCD
+
+ .begin no-absolute-literals
+ .literal_position
+
+_SetupOCD:
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+#if XCHAL_HAVE_WINDOWED
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+#endif
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+_SetupMMU:
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ /*
+ * Start Secondary Processors with NULL pointer to boot params.
+ */
+ movi a2, 0 # a2 == NULL
+ movi a3, _startup
+ jx a3
+
+ .end no-absolute-literals
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
new file mode 100644
index 000000000..94955caa4
--- /dev/null
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA coherent memory allocation.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * Based on version for i386.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <linux/dma-map-ops.h>
+#include <linux/dma-direct.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/platform.h>
+
+static void do_cache_op(phys_addr_t paddr, size_t size,
+ void (*fn)(unsigned long, unsigned long))
+{
+ unsigned long off = paddr & (PAGE_SIZE - 1);
+ unsigned long pfn = PFN_DOWN(paddr);
+ struct page *page = pfn_to_page(pfn);
+
+ if (!PageHighMem(page))
+ fn((unsigned long)phys_to_virt(paddr), size);
+ else
+ while (size > 0) {
+ size_t sz = min_t(size_t, size, PAGE_SIZE - off);
+ void *vaddr = kmap_atomic(page);
+
+ fn((unsigned long)vaddr + off, sz);
+ kunmap_atomic(vaddr);
+ off = 0;
+ ++page;
+ size -= sz;
+ }
+}
+
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ case DMA_FROM_DEVICE:
+ do_cache_op(paddr, size, __invalidate_dcache_range);
+ break;
+
+ case DMA_NONE:
+ BUG();
+ break;
+
+ default:
+ break;
+ }
+}
+
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ case DMA_TO_DEVICE:
+ if (XCHAL_DCACHE_IS_WRITEBACK)
+ do_cache_op(paddr, size, __flush_dcache_range);
+ break;
+
+ case DMA_NONE:
+ BUG();
+ break;
+
+ default:
+ break;
+ }
+}
+
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ __invalidate_dcache_range((unsigned long)page_address(page), size);
+}
+
+/*
+ * Memory caching is platform-dependent in noMMU xtensa configurations.
+ * This function should be implemented in platform code in order to enable
+ * coherent DMA memory operations when CONFIG_MMU is not enabled.
+ */
+#ifdef CONFIG_MMU
+void *arch_dma_set_uncached(void *p, size_t size)
+{
+ return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
+}
+#endif /* CONFIG_MMU */
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
new file mode 100644
index 000000000..62c900e40
--- /dev/null
+++ b/arch/xtensa/kernel/pci.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/xtensa/kernel/pci.c
+ *
+ * PCI bios-type initialisation for PCI machines
+ *
+ * Copyright (C) 2001-2005 Tensilica Inc.
+ *
+ * Based largely on work from Cort (ppc/kernel/pci.c)
+ * IO functions copied from sparc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/memblock.h>
+
+#include <asm/pci-bridge.h>
+#include <asm/platform.h>
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ struct pci_dev *dev = data;
+ resource_size_t start = res->start;
+
+ if (res->flags & IORESOURCE_IO) {
+ if (size > 0x100) {
+ pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n",
+ pci_name(dev), dev->resource - res,
+ size);
+ }
+
+ if (start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+ }
+
+ return start;
+}
+
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+ if (bus->parent) {
+ /* This is a subordinate bridge */
+ pci_read_bridge_bases(bus);
+ }
+}
+
+/*
+ * Platform support for /proc/bus/pci/X/Y mmap()s.
+ * -- paulus.
+ */
+
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
+{
+ struct pci_controller *pci_ctrl = (struct pci_controller*) pdev->sysdata;
+ resource_size_t ioaddr = pci_resource_start(pdev, bar);
+
+ if (!pci_ctrl)
+ return -EINVAL; /* should never happen */
+
+ /* Convert to an offset within this PCI controller */
+ ioaddr -= (unsigned long)pci_ctrl->io_space.base;
+
+ vma->vm_pgoff += (ioaddr + pci_ctrl->io_space.start) >> PAGE_SHIFT;
+ return 0;
+}
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
new file mode 100644
index 000000000..183618090
--- /dev/null
+++ b/arch/xtensa/kernel/perf_event.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Xtensa Performance Monitor Module driver
+ * See Tensilica Debug User's Guide for PMU registers documentation.
+ *
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+#include <asm/core.h>
+#include <asm/processor.h>
+#include <asm/stacktrace.h>
+
+#define XTENSA_HWVERSION_RG_2015_0 260000
+
+#if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RG_2015_0
+#define XTENSA_PMU_ERI_BASE 0x00101000
+#else
+#define XTENSA_PMU_ERI_BASE 0x00001000
+#endif
+
+/* Global control/status for all perf counters */
+#define XTENSA_PMU_PMG XTENSA_PMU_ERI_BASE
+/* Perf counter values */
+#define XTENSA_PMU_PM(i) (XTENSA_PMU_ERI_BASE + 0x80 + (i) * 4)
+/* Perf counter control registers */
+#define XTENSA_PMU_PMCTRL(i) (XTENSA_PMU_ERI_BASE + 0x100 + (i) * 4)
+/* Perf counter status registers */
+#define XTENSA_PMU_PMSTAT(i) (XTENSA_PMU_ERI_BASE + 0x180 + (i) * 4)
+
+#define XTENSA_PMU_PMG_PMEN 0x1
+
+#define XTENSA_PMU_COUNTER_MASK 0xffffffffULL
+#define XTENSA_PMU_COUNTER_MAX 0x7fffffff
+
+#define XTENSA_PMU_PMCTRL_INTEN 0x00000001
+#define XTENSA_PMU_PMCTRL_KRNLCNT 0x00000008
+#define XTENSA_PMU_PMCTRL_TRACELEVEL 0x000000f0
+#define XTENSA_PMU_PMCTRL_SELECT_SHIFT 8
+#define XTENSA_PMU_PMCTRL_SELECT 0x00001f00
+#define XTENSA_PMU_PMCTRL_MASK_SHIFT 16
+#define XTENSA_PMU_PMCTRL_MASK 0xffff0000
+
+#define XTENSA_PMU_MASK(select, mask) \
+ (((select) << XTENSA_PMU_PMCTRL_SELECT_SHIFT) | \
+ ((mask) << XTENSA_PMU_PMCTRL_MASK_SHIFT) | \
+ XTENSA_PMU_PMCTRL_TRACELEVEL | \
+ XTENSA_PMU_PMCTRL_INTEN)
+
+#define XTENSA_PMU_PMSTAT_OVFL 0x00000001
+#define XTENSA_PMU_PMSTAT_INTASRT 0x00000010
+
+struct xtensa_pmu_events {
+ /* Array of events currently on this core */
+ struct perf_event *event[XCHAL_NUM_PERF_COUNTERS];
+ /* Bitmap of used hardware counters */
+ unsigned long used_mask[BITS_TO_LONGS(XCHAL_NUM_PERF_COUNTERS)];
+};
+static DEFINE_PER_CPU(struct xtensa_pmu_events, xtensa_pmu_events);
+
+static const u32 xtensa_hw_ctl[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = XTENSA_PMU_MASK(0, 0x1),
+ [PERF_COUNT_HW_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0xffff),
+ [PERF_COUNT_HW_CACHE_REFERENCES] = XTENSA_PMU_MASK(10, 0x1),
+ [PERF_COUNT_HW_CACHE_MISSES] = XTENSA_PMU_MASK(12, 0x1),
+ /* Taken and non-taken branches + taken loop ends */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0x490),
+ /* Instruction-related + other global stall cycles */
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XTENSA_PMU_MASK(4, 0x1ff),
+ /* Data-related global stall cycles */
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = XTENSA_PMU_MASK(3, 0x1ff),
+};
+
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+
+static const u32 xtensa_cache_ctl[][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(10, 0x1),
+ [C(RESULT_MISS)] = XTENSA_PMU_MASK(10, 0x2),
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(11, 0x1),
+ [C(RESULT_MISS)] = XTENSA_PMU_MASK(11, 0x2),
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(8, 0x1),
+ [C(RESULT_MISS)] = XTENSA_PMU_MASK(8, 0x2),
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(9, 0x1),
+ [C(RESULT_MISS)] = XTENSA_PMU_MASK(9, 0x8),
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(7, 0x1),
+ [C(RESULT_MISS)] = XTENSA_PMU_MASK(7, 0x8),
+ },
+ },
+};
+
+static int xtensa_pmu_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ int ret;
+
+ cache_type = (config >> 0) & 0xff;
+ cache_op = (config >> 8) & 0xff;
+ cache_result = (config >> 16) & 0xff;
+
+ if (cache_type >= ARRAY_SIZE(xtensa_cache_ctl) ||
+ cache_op >= C(OP_MAX) ||
+ cache_result >= C(RESULT_MAX))
+ return -EINVAL;
+
+ ret = xtensa_cache_ctl[cache_type][cache_op][cache_result];
+
+ if (ret == 0)
+ return -EINVAL;
+
+ return ret;
+}
+
+static inline uint32_t xtensa_pmu_read_counter(int idx)
+{
+ return get_er(XTENSA_PMU_PM(idx));
+}
+
+static inline void xtensa_pmu_write_counter(int idx, uint32_t v)
+{
+ set_er(v, XTENSA_PMU_PM(idx));
+}
+
+static void xtensa_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ uint64_t prev_raw_count, new_raw_count;
+ int64_t delta;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = xtensa_pmu_read_counter(event->hw.idx);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count);
+
+ delta = (new_raw_count - prev_raw_count) & XTENSA_PMU_COUNTER_MASK;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static bool xtensa_perf_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ bool rc = false;
+ s64 left;
+
+ if (!is_sampling_event(event)) {
+ left = XTENSA_PMU_COUNTER_MAX;
+ } else {
+ s64 period = hwc->sample_period;
+
+ left = local64_read(&hwc->period_left);
+ if (left <= -period) {
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ rc = true;
+ } else if (left <= 0) {
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ rc = true;
+ }
+ if (left > XTENSA_PMU_COUNTER_MAX)
+ left = XTENSA_PMU_COUNTER_MAX;
+ }
+
+ local64_set(&hwc->prev_count, -left);
+ xtensa_pmu_write_counter(idx, -left);
+ perf_event_update_userpage(event);
+
+ return rc;
+}
+
+static void xtensa_pmu_enable(struct pmu *pmu)
+{
+ set_er(get_er(XTENSA_PMU_PMG) | XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
+}
+
+static void xtensa_pmu_disable(struct pmu *pmu)
+{
+ set_er(get_er(XTENSA_PMU_PMG) & ~XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
+}
+
+static int xtensa_pmu_event_init(struct perf_event *event)
+{
+ int ret;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) ||
+ xtensa_hw_ctl[event->attr.config] == 0)
+ return -EINVAL;
+ event->hw.config = xtensa_hw_ctl[event->attr.config];
+ return 0;
+
+ case PERF_TYPE_HW_CACHE:
+ ret = xtensa_pmu_cache_event(event->attr.config);
+ if (ret < 0)
+ return ret;
+ event->hw.config = ret;
+ return 0;
+
+ case PERF_TYPE_RAW:
+ /* Not 'previous counter' select */
+ if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) ==
+ (1 << XTENSA_PMU_PMCTRL_SELECT_SHIFT))
+ return -EINVAL;
+ event->hw.config = (event->attr.config &
+ (XTENSA_PMU_PMCTRL_KRNLCNT |
+ XTENSA_PMU_PMCTRL_TRACELEVEL |
+ XTENSA_PMU_PMCTRL_SELECT |
+ XTENSA_PMU_PMCTRL_MASK)) |
+ XTENSA_PMU_PMCTRL_INTEN;
+ return 0;
+
+ default:
+ return -ENOENT;
+ }
+}
+
+/*
+ * Starts/Stops a counter present on the PMU. The PMI handler
+ * should stop the counter when perf_event_overflow() returns
+ * !0. ->start() will be used to continue.
+ */
+static void xtensa_pmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (WARN_ON_ONCE(idx == -1))
+ return;
+
+ if (flags & PERF_EF_RELOAD) {
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+ xtensa_perf_event_set_period(event, hwc, idx);
+ }
+
+ hwc->state = 0;
+
+ set_er(hwc->config, XTENSA_PMU_PMCTRL(idx));
+}
+
+static void xtensa_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ set_er(0, XTENSA_PMU_PMCTRL(idx));
+ set_er(get_er(XTENSA_PMU_PMSTAT(idx)),
+ XTENSA_PMU_PMSTAT(idx));
+ hwc->state |= PERF_HES_STOPPED;
+ }
+
+ if ((flags & PERF_EF_UPDATE) &&
+ !(event->hw.state & PERF_HES_UPTODATE)) {
+ xtensa_perf_event_update(event, &event->hw, idx);
+ event->hw.state |= PERF_HES_UPTODATE;
+ }
+}
+
+/*
+ * Adds/Removes a counter to/from the PMU, can be done inside
+ * a transaction, see the ->*_txn() methods.
+ */
+static int xtensa_pmu_add(struct perf_event *event, int flags)
+{
+ struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (__test_and_set_bit(idx, ev->used_mask)) {
+ idx = find_first_zero_bit(ev->used_mask,
+ XCHAL_NUM_PERF_COUNTERS);
+ if (idx == XCHAL_NUM_PERF_COUNTERS)
+ return -EAGAIN;
+
+ __set_bit(idx, ev->used_mask);
+ hwc->idx = idx;
+ }
+ ev->event[idx] = event;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ xtensa_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+ return 0;
+}
+
+static void xtensa_pmu_del(struct perf_event *event, int flags)
+{
+ struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+
+ xtensa_pmu_stop(event, PERF_EF_UPDATE);
+ __clear_bit(event->hw.idx, ev->used_mask);
+ perf_event_update_userpage(event);
+}
+
+static void xtensa_pmu_read(struct perf_event *event)
+{
+ xtensa_perf_event_update(event, &event->hw, event->hw.idx);
+}
+
+static int callchain_trace(struct stackframe *frame, void *data)
+{
+ struct perf_callchain_entry_ctx *entry = data;
+
+ perf_callchain_store(entry, frame->pc);
+ return 0;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ xtensa_backtrace_kernel(regs, entry->max_stack,
+ callchain_trace, NULL, entry);
+}
+
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ xtensa_backtrace_user(regs, entry->max_stack,
+ callchain_trace, entry);
+}
+
+void perf_event_print_debug(void)
+{
+ unsigned long flags;
+ unsigned i;
+
+ local_irq_save(flags);
+ pr_info("CPU#%d: PMG: 0x%08lx\n", smp_processor_id(),
+ get_er(XTENSA_PMU_PMG));
+ for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i)
+ pr_info("PM%d: 0x%08lx, PMCTRL%d: 0x%08lx, PMSTAT%d: 0x%08lx\n",
+ i, get_er(XTENSA_PMU_PM(i)),
+ i, get_er(XTENSA_PMU_PMCTRL(i)),
+ i, get_er(XTENSA_PMU_PMSTAT(i)));
+ local_irq_restore(flags);
+}
+
+irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
+{
+ irqreturn_t rc = IRQ_NONE;
+ struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+ unsigned i;
+
+ for_each_set_bit(i, ev->used_mask, XCHAL_NUM_PERF_COUNTERS) {
+ uint32_t v = get_er(XTENSA_PMU_PMSTAT(i));
+ struct perf_event *event = ev->event[i];
+ struct hw_perf_event *hwc = &event->hw;
+ u64 last_period;
+
+ if (!(v & XTENSA_PMU_PMSTAT_OVFL))
+ continue;
+
+ set_er(v, XTENSA_PMU_PMSTAT(i));
+ xtensa_perf_event_update(event, hwc, i);
+ last_period = hwc->last_period;
+ if (xtensa_perf_event_set_period(event, hwc, i)) {
+ struct perf_sample_data data;
+ struct pt_regs *regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0, last_period);
+ if (perf_event_overflow(event, &data, regs))
+ xtensa_pmu_stop(event, 0);
+ }
+
+ rc = IRQ_HANDLED;
+ }
+ return rc;
+}
+
+static struct pmu xtensa_pmu = {
+ .pmu_enable = xtensa_pmu_enable,
+ .pmu_disable = xtensa_pmu_disable,
+ .event_init = xtensa_pmu_event_init,
+ .add = xtensa_pmu_add,
+ .del = xtensa_pmu_del,
+ .start = xtensa_pmu_start,
+ .stop = xtensa_pmu_stop,
+ .read = xtensa_pmu_read,
+};
+
+static int xtensa_pmu_setup(unsigned int cpu)
+{
+ unsigned i;
+
+ set_er(0, XTENSA_PMU_PMG);
+ for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) {
+ set_er(0, XTENSA_PMU_PMCTRL(i));
+ set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i));
+ }
+ return 0;
+}
+
+static int __init xtensa_pmu_init(void)
+{
+ int ret;
+ int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
+
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING,
+ "perf/xtensa:starting", xtensa_pmu_setup,
+ NULL);
+ if (ret) {
+ pr_err("xtensa_pmu: failed to register CPU-hotplug.\n");
+ return ret;
+ }
+#if XTENSA_FAKE_NMI
+ enable_irq(irq);
+#else
+ ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU,
+ "pmu", NULL);
+ if (ret < 0)
+ return ret;
+#endif
+
+ ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW);
+ if (ret)
+ free_irq(irq, NULL);
+
+ return ret;
+}
+early_initcall(xtensa_pmu_init);
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
new file mode 100644
index 000000000..ac1e0e566
--- /dev/null
+++ b/arch/xtensa/kernel/platform.c
@@ -0,0 +1,43 @@
+/*
+ * arch/xtensa/kernel/platform.c
+ *
+ * Default platform functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <asm/platform.h>
+#include <asm/timex.h>
+
+#define _F(r,f,a,b) \
+ r __platform_##f a b; \
+ r platform_##f a __attribute__((weak, alias("__platform_"#f)))
+
+/*
+ * Default functions that are used if no platform specific function is defined.
+ * (Please, refer to include/asm-xtensa/platform.h for more information)
+ */
+
+_F(void, init, (bp_tag_t *first), { });
+_F(void, setup, (char** cmd), { });
+_F(void, restart, (void), { while(1); });
+_F(void, halt, (void), { while(1); });
+_F(void, power_off, (void), { while(1); });
+_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
+_F(void, heartbeat, (void), { });
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+_F(void, calibrate_ccount, (void),
+{
+ pr_err("ERROR: Cannot calibrate cpu frequency! Assuming 10MHz.\n");
+ ccount_freq = 10 * 1000000UL;
+});
+#endif
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
new file mode 100644
index 000000000..68e0e2f06
--- /dev/null
+++ b/arch/xtensa/kernel/process.c
@@ -0,0 +1,398 @@
+/*
+ * arch/xtensa/kernel/process.c
+ *
+ * Xtensa Processor version.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/elf.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/init.h>
+#include <linux/prctl.h>
+#include <linux/init_task.h>
+#include <linux/module.h>
+#include <linux/mqueue.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/mmu.h>
+#include <asm/irq.h>
+#include <linux/atomic.h>
+#include <asm/asm-offsets.h>
+#include <asm/regs.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/traps.h>
+
+extern void ret_from_fork(void);
+extern void ret_from_kernel_thread(void);
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+#if XTENSA_HAVE_COPROCESSORS
+
+void local_coprocessors_flush_release_all(void)
+{
+ struct thread_info **coprocessor_owner;
+ struct thread_info *unique_owner[XCHAL_CP_MAX];
+ int n = 0;
+ int i, j;
+
+ coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
+ xtensa_set_sr(XCHAL_CP_MASK, cpenable);
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ struct thread_info *ti = coprocessor_owner[i];
+
+ if (ti) {
+ coprocessor_flush(ti, i);
+
+ for (j = 0; j < n; j++)
+ if (unique_owner[j] == ti)
+ break;
+ if (j == n)
+ unique_owner[n++] = ti;
+
+ coprocessor_owner[i] = NULL;
+ }
+ }
+ for (i = 0; i < n; i++) {
+ /* pairs with memw (1) in fast_coprocessor and memw in switch_to */
+ smp_wmb();
+ unique_owner[i]->cpenable = 0;
+ }
+ xtensa_set_sr(0, cpenable);
+}
+
+static void local_coprocessor_release_all(void *info)
+{
+ struct thread_info *ti = info;
+ struct thread_info **coprocessor_owner;
+ int i;
+
+ coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
+
+ /* Walk through all cp owners and release it for the requested one. */
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if (coprocessor_owner[i] == ti)
+ coprocessor_owner[i] = NULL;
+ }
+ /* pairs with memw (1) in fast_coprocessor and memw in switch_to */
+ smp_wmb();
+ ti->cpenable = 0;
+ if (ti == current_thread_info())
+ xtensa_set_sr(0, cpenable);
+}
+
+void coprocessor_release_all(struct thread_info *ti)
+{
+ if (ti->cpenable) {
+ /* pairs with memw (2) in fast_coprocessor */
+ smp_rmb();
+ smp_call_function_single(ti->cp_owner_cpu,
+ local_coprocessor_release_all,
+ ti, true);
+ }
+}
+
+static void local_coprocessor_flush_all(void *info)
+{
+ struct thread_info *ti = info;
+ struct thread_info **coprocessor_owner;
+ unsigned long old_cpenable;
+ int i;
+
+ coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner;
+ old_cpenable = xtensa_xsr(ti->cpenable, cpenable);
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if (coprocessor_owner[i] == ti)
+ coprocessor_flush(ti, i);
+ }
+ xtensa_set_sr(old_cpenable, cpenable);
+}
+
+void coprocessor_flush_all(struct thread_info *ti)
+{
+ if (ti->cpenable) {
+ /* pairs with memw (2) in fast_coprocessor */
+ smp_rmb();
+ smp_call_function_single(ti->cp_owner_cpu,
+ local_coprocessor_flush_all,
+ ti, true);
+ }
+}
+
+static void local_coprocessor_flush_release_all(void *info)
+{
+ local_coprocessor_flush_all(info);
+ local_coprocessor_release_all(info);
+}
+
+void coprocessor_flush_release_all(struct thread_info *ti)
+{
+ if (ti->cpenable) {
+ /* pairs with memw (2) in fast_coprocessor */
+ smp_rmb();
+ smp_call_function_single(ti->cp_owner_cpu,
+ local_coprocessor_flush_release_all,
+ ti, true);
+ }
+}
+
+#endif
+
+
+/*
+ * Powermanagement idle function, if any is provided by the platform.
+ */
+void arch_cpu_idle(void)
+{
+ platform_idle();
+}
+
+/*
+ * This is called when the thread calls exit().
+ */
+void exit_thread(struct task_struct *tsk)
+{
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_release_all(task_thread_info(tsk));
+#endif
+}
+
+/*
+ * Flush thread state. This is called when a thread does an execve()
+ * Note that we flush coprocessor registers for the case execve fails.
+ */
+void flush_thread(void)
+{
+#if XTENSA_HAVE_COPROCESSORS
+ struct thread_info *ti = current_thread_info();
+ coprocessor_flush_release_all(ti);
+#endif
+ flush_ptrace_hw_breakpoint(current);
+}
+
+/*
+ * this gets called so that we can store coprocessor state into memory and
+ * copy the current task into the new thread.
+ */
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_flush_all(task_thread_info(src));
+#endif
+ *dst = *src;
+ return 0;
+}
+
+/*
+ * Copy thread.
+ *
+ * There are two modes in which this function is called:
+ * 1) Userspace thread creation,
+ * regs != NULL, usp_thread_fn is userspace stack pointer.
+ * It is expected to copy parent regs (in case CLONE_VM is not set
+ * in the clone_flags) and set up passed usp in the childregs.
+ * 2) Kernel thread creation,
+ * regs == NULL, usp_thread_fn is the function to run in the new thread
+ * and thread_fn_arg is its parameter.
+ * childregs are not used for the kernel threads.
+ *
+ * The stack layout for the new thread looks like this:
+ *
+ * +------------------------+
+ * | childregs |
+ * +------------------------+ <- thread.sp = sp in dummy-frame
+ * | dummy-frame | (saved in dummy-frame spill-area)
+ * +------------------------+
+ *
+ * We create a dummy frame to return to either ret_from_fork or
+ * ret_from_kernel_thread:
+ * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
+ * sp points to itself (thread.sp)
+ * a2, a3 are unused for userspace threads,
+ * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
+ *
+ * Note: This is a pristine frame, so we don't need any spill region on top of
+ * childregs.
+ *
+ * The fun part: if we're keeping the same VM (i.e. cloning a thread,
+ * not an entire process), we're normally given a new usp, and we CANNOT share
+ * any live address register windows. If we just copy those live frames over,
+ * the two threads (parent and child) will overflow the same frames onto the
+ * parent stack at different times, likely corrupting the parent stack (esp.
+ * if the parent returns from functions that called clone() and calls new
+ * ones, before the child overflows its now old copies of its parent windows).
+ * One solution is to spill windows to the parent stack, but that's fairly
+ * involved. Much simpler to just not copy those live frames across.
+ */
+
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp_thread_fn = args->stack;
+ unsigned long tls = args->tls;
+ struct pt_regs *childregs = task_pt_regs(p);
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ struct thread_info *ti;
+#endif
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+ /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
+ SPILL_SLOT(childregs, 1) = (unsigned long)childregs;
+ SPILL_SLOT(childregs, 0) = 0;
+
+ p->thread.sp = (unsigned long)childregs;
+#elif defined(__XTENSA_CALL0_ABI__)
+ /* Reserve 16 bytes for the _switch_to stack frame. */
+ p->thread.sp = (unsigned long)childregs - 16;
+#else
+#error Unsupported Xtensa ABI
+#endif
+
+ if (!args->fn) {
+ struct pt_regs *regs = current_pt_regs();
+ unsigned long usp = usp_thread_fn ?
+ usp_thread_fn : regs->areg[1];
+
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_fork, 0x1);
+
+ *childregs = *regs;
+ childregs->areg[1] = usp;
+ childregs->areg[2] = 0;
+
+ /* When sharing memory with the parent thread, the child
+ usually starts on a pristine stack, so we have to reset
+ windowbase, windowstart and wmask.
+ (Note that such a new thread is required to always create
+ an initial call4 frame)
+ The exception is vfork, where the new thread continues to
+ run on the parent's stack until it calls execve. This could
+ be a call8 or call12, which requires a legal stack frame
+ of the previous caller for the overflow handlers to work.
+ (Note that it's always legal to overflow live registers).
+ In this case, ensure to spill at least the stack pointer
+ of that frame. */
+
+ if (clone_flags & CLONE_VM) {
+ /* check that caller window is live and same stack */
+ int len = childregs->wmask & ~0xf;
+ if (regs->areg[1] == usp && len != 0) {
+ int callinc = (regs->areg[0] >> 30) & 3;
+ int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
+ put_user(regs->areg[caller_ars+1],
+ (unsigned __user*)(usp - 12));
+ }
+ childregs->wmask = 1;
+ childregs->windowstart = 1;
+ childregs->windowbase = 0;
+ }
+
+ if (clone_flags & CLONE_SETTLS)
+ childregs->threadptr = tls;
+ } else {
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_kernel_thread, 1);
+
+ /* pass parameters to ret_from_kernel_thread: */
+#if defined(__XTENSA_WINDOWED_ABI__)
+ /*
+ * a2 = thread_fn, a3 = thread_fn arg.
+ * Window underflow will load registers from the
+ * spill slots on the stack on return from _switch_to.
+ */
+ SPILL_SLOT(childregs, 2) = (unsigned long)args->fn;
+ SPILL_SLOT(childregs, 3) = (unsigned long)args->fn_arg;
+#elif defined(__XTENSA_CALL0_ABI__)
+ /*
+ * a12 = thread_fn, a13 = thread_fn arg.
+ * _switch_to epilogue will load registers from the stack.
+ */
+ ((unsigned long *)p->thread.sp)[0] = (unsigned long)args->fn;
+ ((unsigned long *)p->thread.sp)[1] = (unsigned long)args->fn_arg;
+#else
+#error Unsupported Xtensa ABI
+#endif
+
+ /* Childregs are only used when we're going to userspace
+ * in which case start_thread will set them up.
+ */
+ }
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ ti = task_thread_info(p);
+ ti->cpenable = 0;
+#endif
+
+ clear_ptrace_hw_breakpoint(p);
+
+ return 0;
+}
+
+
+/*
+ * These bracket the sleeping functions..
+ */
+
+unsigned long __get_wchan(struct task_struct *p)
+{
+ unsigned long sp, pc;
+ unsigned long stack_page = (unsigned long) task_stack_page(p);
+ int count = 0;
+
+ sp = p->thread.sp;
+ pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
+
+ do {
+ if (sp < stack_page + sizeof(struct task_struct) ||
+ sp >= (stack_page + THREAD_SIZE) ||
+ pc == 0)
+ return 0;
+ if (!in_sched_functions(pc))
+ return pc;
+
+ /* Stack layout: sp-4: ra, sp-3: sp' */
+
+ pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
+ sp = SPILL_SLOT(sp, 1);
+ } while (count++ < 16);
+ return 0;
+}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
new file mode 100644
index 000000000..f29477162
--- /dev/null
+++ b/arch/xtensa/kernel/ptrace.c
@@ -0,0 +1,585 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Scott Foehner<sfoehner@yahoo.com>,
+ * Kevin Chea
+ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+#include <linux/audit.h>
+#include <linux/errno.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/seccomp.h>
+#include <linux/security.h>
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/uaccess.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+#include <asm/coprocessor.h>
+#include <asm/elf.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ struct user_pt_regs newregs = {
+ .pc = regs->pc,
+ .ps = regs->ps & ~(1 << PS_EXCM_BIT),
+ .lbeg = regs->lbeg,
+ .lend = regs->lend,
+ .lcount = regs->lcount,
+ .sar = regs->sar,
+ .threadptr = regs->threadptr,
+ .windowbase = regs->windowbase,
+ .windowstart = regs->windowstart,
+ .syscall = regs->syscall,
+ };
+
+ memcpy(newregs.a,
+ regs->areg + XCHAL_NUM_AREGS - regs->windowbase * 4,
+ regs->windowbase * 16);
+ memcpy(newregs.a + regs->windowbase * 4,
+ regs->areg,
+ (WSBITS - regs->windowbase) * 16);
+
+ return membuf_write(&to, &newregs, sizeof(newregs));
+}
+
+static int gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct user_pt_regs newregs = {0};
+ struct pt_regs *regs;
+ const u32 ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
+ if (ret)
+ return ret;
+
+ if (newregs.windowbase >= XCHAL_NUM_AREGS / 4)
+ return -EINVAL;
+
+ regs = task_pt_regs(target);
+ regs->pc = newregs.pc;
+ regs->ps = (regs->ps & ~ps_mask) | (newregs.ps & ps_mask);
+ regs->lbeg = newregs.lbeg;
+ regs->lend = newregs.lend;
+ regs->lcount = newregs.lcount;
+ regs->sar = newregs.sar;
+ regs->threadptr = newregs.threadptr;
+
+ if (newregs.syscall)
+ regs->syscall = newregs.syscall;
+
+ if (newregs.windowbase != regs->windowbase ||
+ newregs.windowstart != regs->windowstart) {
+ u32 rotws, wmask;
+
+ rotws = (((newregs.windowstart |
+ (newregs.windowstart << WSBITS)) >>
+ newregs.windowbase) &
+ ((1 << WSBITS) - 1)) & ~1;
+ wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
+ (rotws & 0xF) | 1;
+ regs->windowbase = newregs.windowbase;
+ regs->windowstart = newregs.windowstart;
+ regs->wmask = wmask;
+ }
+
+ memcpy(regs->areg + XCHAL_NUM_AREGS - newregs.windowbase * 4,
+ newregs.a, newregs.windowbase * 16);
+ memcpy(regs->areg, newregs.a + newregs.windowbase * 4,
+ (WSBITS - newregs.windowbase) * 16);
+
+ return 0;
+}
+
+static int tie_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int ret;
+ struct pt_regs *regs = task_pt_regs(target);
+ struct thread_info *ti = task_thread_info(target);
+ elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL);
+
+ if (!newregs)
+ return -ENOMEM;
+
+ newregs->opt = regs->xtregs_opt;
+ newregs->user = ti->xtregs_user;
+
+#if XTENSA_HAVE_COPROCESSORS
+ /* Flush all coprocessor registers to memory. */
+ coprocessor_flush_all(ti);
+ newregs->cp0 = ti->xtregs_cp.cp0;
+ newregs->cp1 = ti->xtregs_cp.cp1;
+ newregs->cp2 = ti->xtregs_cp.cp2;
+ newregs->cp3 = ti->xtregs_cp.cp3;
+ newregs->cp4 = ti->xtregs_cp.cp4;
+ newregs->cp5 = ti->xtregs_cp.cp5;
+ newregs->cp6 = ti->xtregs_cp.cp6;
+ newregs->cp7 = ti->xtregs_cp.cp7;
+#endif
+ ret = membuf_write(&to, newregs, sizeof(*newregs));
+ kfree(newregs);
+ return ret;
+}
+
+static int tie_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct pt_regs *regs = task_pt_regs(target);
+ struct thread_info *ti = task_thread_info(target);
+ elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL);
+
+ if (!newregs)
+ return -ENOMEM;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ newregs, 0, -1);
+
+ if (ret)
+ goto exit;
+ regs->xtregs_opt = newregs->opt;
+ ti->xtregs_user = newregs->user;
+
+#if XTENSA_HAVE_COPROCESSORS
+ /* Flush all coprocessors before we overwrite them. */
+ coprocessor_flush_release_all(ti);
+ ti->xtregs_cp.cp0 = newregs->cp0;
+ ti->xtregs_cp.cp1 = newregs->cp1;
+ ti->xtregs_cp.cp2 = newregs->cp2;
+ ti->xtregs_cp.cp3 = newregs->cp3;
+ ti->xtregs_cp.cp4 = newregs->cp4;
+ ti->xtregs_cp.cp5 = newregs->cp5;
+ ti->xtregs_cp.cp6 = newregs->cp6;
+ ti->xtregs_cp.cp7 = newregs->cp7;
+#endif
+exit:
+ kfree(newregs);
+ return ret;
+}
+
+enum xtensa_regset {
+ REGSET_GPR,
+ REGSET_TIE,
+};
+
+static const struct user_regset xtensa_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = sizeof(struct user_pt_regs) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_TIE] = {
+ .core_note_type = NT_PRFPREG,
+ .n = sizeof(elf_xtregs_t) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = tie_get,
+ .set = tie_set,
+ },
+};
+
+static const struct user_regset_view user_xtensa_view = {
+ .name = "xtensa",
+ .e_machine = EM_XTENSA,
+ .regsets = xtensa_regsets,
+ .n = ARRAY_SIZE(xtensa_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_xtensa_view;
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching to disable single stepping.
+ */
+
+void ptrace_disable(struct task_struct *child)
+{
+ /* Nothing to do.. */
+}
+
+static int ptrace_getregs(struct task_struct *child, void __user *uregs)
+{
+ return copy_regset_to_user(child, &user_xtensa_view, REGSET_GPR,
+ 0, sizeof(xtensa_gregset_t), uregs);
+}
+
+static int ptrace_setregs(struct task_struct *child, void __user *uregs)
+{
+ return copy_regset_from_user(child, &user_xtensa_view, REGSET_GPR,
+ 0, sizeof(xtensa_gregset_t), uregs);
+}
+
+static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
+{
+ return copy_regset_to_user(child, &user_xtensa_view, REGSET_TIE,
+ 0, sizeof(elf_xtregs_t), uregs);
+}
+
+static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
+{
+ return copy_regset_from_user(child, &user_xtensa_view, REGSET_TIE,
+ 0, sizeof(elf_xtregs_t), uregs);
+}
+
+static int ptrace_peekusr(struct task_struct *child, long regno,
+ long __user *ret)
+{
+ struct pt_regs *regs;
+ unsigned long tmp;
+
+ regs = task_pt_regs(child);
+ tmp = 0; /* Default return value. */
+
+ switch(regno) {
+ case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+ tmp = regs->areg[regno - REG_AR_BASE];
+ break;
+
+ case REG_A_BASE ... REG_A_BASE + 15:
+ tmp = regs->areg[regno - REG_A_BASE];
+ break;
+
+ case REG_PC:
+ tmp = regs->pc;
+ break;
+
+ case REG_PS:
+ /* Note: PS.EXCM is not set while user task is running;
+ * its being set in regs is for exception handling
+ * convenience.
+ */
+ tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
+ break;
+
+ case REG_WB:
+ break; /* tmp = 0 */
+
+ case REG_WS:
+ {
+ unsigned long wb = regs->windowbase;
+ unsigned long ws = regs->windowstart;
+ tmp = ((ws >> wb) | (ws << (WSBITS - wb))) &
+ ((1 << WSBITS) - 1);
+ break;
+ }
+ case REG_LBEG:
+ tmp = regs->lbeg;
+ break;
+
+ case REG_LEND:
+ tmp = regs->lend;
+ break;
+
+ case REG_LCOUNT:
+ tmp = regs->lcount;
+ break;
+
+ case REG_SAR:
+ tmp = regs->sar;
+ break;
+
+ case SYSCALL_NR:
+ tmp = regs->syscall;
+ break;
+
+ default:
+ return -EIO;
+ }
+ return put_user(tmp, ret);
+}
+
+static int ptrace_pokeusr(struct task_struct *child, long regno, long val)
+{
+ struct pt_regs *regs;
+ regs = task_pt_regs(child);
+
+ switch (regno) {
+ case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+ regs->areg[regno - REG_AR_BASE] = val;
+ break;
+
+ case REG_A_BASE ... REG_A_BASE + 15:
+ regs->areg[regno - REG_A_BASE] = val;
+ break;
+
+ case REG_PC:
+ regs->pc = val;
+ break;
+
+ case SYSCALL_NR:
+ regs->syscall = val;
+ break;
+
+ default:
+ return -EIO;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+static void ptrace_hbptriggered(struct perf_event *bp,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ int i;
+ struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
+
+ if (bp->attr.bp_type & HW_BREAKPOINT_X) {
+ for (i = 0; i < XCHAL_NUM_IBREAK; ++i)
+ if (current->thread.ptrace_bp[i] == bp)
+ break;
+ i <<= 1;
+ } else {
+ for (i = 0; i < XCHAL_NUM_DBREAK; ++i)
+ if (current->thread.ptrace_wp[i] == bp)
+ break;
+ i = (i << 1) | 1;
+ }
+
+ force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
+}
+
+static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
+{
+ struct perf_event_attr attr;
+
+ ptrace_breakpoint_init(&attr);
+
+ /* Initialise fields to sane defaults. */
+ attr.bp_addr = 0;
+ attr.bp_len = 1;
+ attr.bp_type = type;
+ attr.disabled = 1;
+
+ return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
+ tsk);
+}
+
+/*
+ * Address bit 0 choose instruction (0) or data (1) break register, bits
+ * 31..1 are the register number.
+ * Both PTRACE_GETHBPREGS and PTRACE_SETHBPREGS transfer two 32-bit words:
+ * address (0) and control (1).
+ * Instruction breakpoint contorl word is 0 to clear breakpoint, 1 to set.
+ * Data breakpoint control word bit 31 is 'trigger on store', bit 30 is
+ * 'trigger on load, bits 29..0 are length. Length 0 is used to clear a
+ * breakpoint. To set a breakpoint length must be a power of 2 in the range
+ * 1..64 and the address must be length-aligned.
+ */
+
+static long ptrace_gethbpregs(struct task_struct *child, long addr,
+ long __user *datap)
+{
+ struct perf_event *bp;
+ u32 user_data[2] = {0};
+ bool dbreak = addr & 1;
+ unsigned idx = addr >> 1;
+
+ if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
+ (dbreak && idx >= XCHAL_NUM_DBREAK))
+ return -EINVAL;
+
+ if (dbreak)
+ bp = child->thread.ptrace_wp[idx];
+ else
+ bp = child->thread.ptrace_bp[idx];
+
+ if (bp) {
+ user_data[0] = bp->attr.bp_addr;
+ user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len;
+ if (dbreak) {
+ if (bp->attr.bp_type & HW_BREAKPOINT_R)
+ user_data[1] |= DBREAKC_LOAD_MASK;
+ if (bp->attr.bp_type & HW_BREAKPOINT_W)
+ user_data[1] |= DBREAKC_STOR_MASK;
+ }
+ }
+
+ if (copy_to_user(datap, user_data, sizeof(user_data)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long ptrace_sethbpregs(struct task_struct *child, long addr,
+ long __user *datap)
+{
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ u32 user_data[2];
+ bool dbreak = addr & 1;
+ unsigned idx = addr >> 1;
+ int bp_type = 0;
+
+ if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
+ (dbreak && idx >= XCHAL_NUM_DBREAK))
+ return -EINVAL;
+
+ if (copy_from_user(user_data, datap, sizeof(user_data)))
+ return -EFAULT;
+
+ if (dbreak) {
+ bp = child->thread.ptrace_wp[idx];
+ if (user_data[1] & DBREAKC_LOAD_MASK)
+ bp_type |= HW_BREAKPOINT_R;
+ if (user_data[1] & DBREAKC_STOR_MASK)
+ bp_type |= HW_BREAKPOINT_W;
+ } else {
+ bp = child->thread.ptrace_bp[idx];
+ bp_type = HW_BREAKPOINT_X;
+ }
+
+ if (!bp) {
+ bp = ptrace_hbp_create(child,
+ bp_type ? bp_type : HW_BREAKPOINT_RW);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+ if (dbreak)
+ child->thread.ptrace_wp[idx] = bp;
+ else
+ child->thread.ptrace_bp[idx] = bp;
+ }
+
+ attr = bp->attr;
+ attr.bp_addr = user_data[0];
+ attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK);
+ attr.bp_type = bp_type;
+ attr.disabled = !attr.bp_len;
+
+ return modify_user_hw_breakpoint(bp, &attr);
+}
+#endif
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = -EPERM;
+ void __user *datap = (void __user *) data;
+
+ switch (request) {
+ case PTRACE_PEEKUSR: /* read register specified by addr. */
+ ret = ptrace_peekusr(child, addr, datap);
+ break;
+
+ case PTRACE_POKEUSR: /* write register specified by addr. */
+ ret = ptrace_pokeusr(child, addr, data);
+ break;
+
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs(child, datap);
+ break;
+
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs(child, datap);
+ break;
+
+ case PTRACE_GETXTREGS:
+ ret = ptrace_getxregs(child, datap);
+ break;
+
+ case PTRACE_SETXTREGS:
+ ret = ptrace_setxregs(child, datap);
+ break;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ case PTRACE_GETHBPREGS:
+ ret = ptrace_gethbpregs(child, addr, datap);
+ break;
+
+ case PTRACE_SETHBPREGS:
+ ret = ptrace_sethbpregs(child, addr, datap);
+ break;
+#endif
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+void do_syscall_trace_leave(struct pt_regs *regs);
+int do_syscall_trace_enter(struct pt_regs *regs)
+{
+ if (regs->syscall == NO_SYSCALL)
+ regs->areg[2] = -ENOSYS;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ ptrace_report_syscall_entry(regs)) {
+ regs->areg[2] = -ENOSYS;
+ regs->syscall = NO_SYSCALL;
+ return 0;
+ }
+
+ if (regs->syscall == NO_SYSCALL ||
+ secure_computing() == -1) {
+ do_syscall_trace_leave(regs);
+ return 0;
+ }
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, syscall_get_nr(current, regs));
+
+ audit_syscall_entry(regs->syscall, regs->areg[6],
+ regs->areg[3], regs->areg[4],
+ regs->areg[5]);
+ return 1;
+}
+
+void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ audit_syscall_exit(regs);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, regs_return_value(regs));
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ ptrace_report_syscall_exit(regs, step);
+}
diff --git a/arch/xtensa/kernel/s32c1i_selftest.c b/arch/xtensa/kernel/s32c1i_selftest.c
new file mode 100644
index 000000000..8362388c8
--- /dev/null
+++ b/arch/xtensa/kernel/s32c1i_selftest.c
@@ -0,0 +1,127 @@
+/*
+ * S32C1I selftest.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <asm/traps.h>
+
+#if XCHAL_HAVE_S32C1I
+
+static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
+
+/*
+ * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
+ *
+ * If *v == cmp, set *v = set. Return previous *v.
+ */
+static inline int probed_compare_swap(int *v, int cmp, int set)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ " movi %1, 1f\n"
+ " s32i %1, %4, 0\n"
+ " wsr %2, scompare1\n"
+ "1: s32c1i %0, %3, 0\n"
+ : "=a" (set), "=&a" (tmp)
+ : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
+ : "memory"
+ );
+ return set;
+}
+
+/* Handle probed exception */
+
+static void __init do_probed_exception(struct pt_regs *regs)
+{
+ if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
+ regs->pc += 3; /* skip the s32c1i instruction */
+ rcw_exc = regs->exccause;
+ } else {
+ do_unhandled(regs);
+ }
+}
+
+/* Simple test of S32C1I (soc bringup assist) */
+
+static int __init check_s32c1i(void)
+{
+ int n, cause1, cause2;
+ void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
+
+ rcw_probe_pc = 0;
+ handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
+ do_probed_exception);
+ handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
+ do_probed_exception);
+ handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
+ do_probed_exception);
+
+ /* First try an S32C1I that does not store: */
+ rcw_exc = 0;
+ rcw_word = 1;
+ n = probed_compare_swap(&rcw_word, 0, 2);
+ cause1 = rcw_exc;
+
+ /* took exception? */
+ if (cause1 != 0) {
+ /* unclean exception? */
+ if (n != 2 || rcw_word != 1)
+ panic("S32C1I exception error");
+ } else if (rcw_word != 1 || n != 1) {
+ panic("S32C1I compare error");
+ }
+
+ /* Then an S32C1I that stores: */
+ rcw_exc = 0;
+ rcw_word = 0x1234567;
+ n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
+ cause2 = rcw_exc;
+
+ if (cause2 != 0) {
+ /* unclean exception? */
+ if (n != 0xabcde || rcw_word != 0x1234567)
+ panic("S32C1I exception error (b)");
+ } else if (rcw_word != 0xabcde || n != 0x1234567) {
+ panic("S32C1I store error");
+ }
+
+ /* Verify consistency of exceptions: */
+ if (cause1 || cause2) {
+ pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
+ /* If emulation of S32C1I upon bus error gets implemented,
+ * we can get rid of this panic for single core (not SMP)
+ */
+ panic("S32C1I exceptions not currently supported");
+ }
+ if (cause1 != cause2)
+ panic("inconsistent S32C1I exceptions");
+
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
+ return 0;
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+/* This condition should not occur with a commercially deployed processor.
+ * Display reminder for early engr test or demo chips / FPGA bitstreams
+ */
+static int __init check_s32c1i(void)
+{
+ pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
+ return 0;
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+early_initcall(check_s32c1i);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
new file mode 100644
index 000000000..9191738f9
--- /dev/null
+++ b/arch/xtensa/kernel/setup.c
@@ -0,0 +1,704 @@
+/*
+ * arch/xtensa/kernel/setup.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Kevin Chea
+ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/screen_info.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+# include <linux/console.h>
+#endif
+
+#ifdef CONFIG_PROC_FS
+# include <linux/seq_file.h>
+#endif
+
+#include <asm/bootparam.h>
+#include <asm/kasan.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/param.h>
+#include <asm/platform.h>
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/sysmem.h>
+#include <asm/timex.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+struct screen_info screen_info = {
+ .orig_x = 0,
+ .orig_y = 24,
+ .orig_video_cols = 80,
+ .orig_video_lines = 24,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 16,
+};
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern unsigned long initrd_start;
+extern unsigned long initrd_end;
+extern int initrd_below_start_ok;
+#endif
+
+#ifdef CONFIG_USE_OF
+void *dtb_start = __dtb_start;
+#endif
+
+extern unsigned long loops_per_jiffy;
+
+/* Command line specified as configuration option. */
+
+static char __initdata command_line[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_CMDLINE_BOOL
+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
+#endif
+
+#ifdef CONFIG_PARSE_BOOTPARAM
+/*
+ * Boot parameter parsing.
+ *
+ * The Xtensa port uses a list of variable-sized tags to pass data to
+ * the kernel. The first tag must be a BP_TAG_FIRST tag for the list
+ * to be recognised. The list is terminated with a zero-sized
+ * BP_TAG_LAST tag.
+ */
+
+typedef struct tagtable {
+ u32 tag;
+ int (*parse)(const bp_tag_t*);
+} tagtable_t;
+
+#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
+ __section(".taglist") __attribute__((used)) = { tag, fn }
+
+/* parse current tag */
+
+static int __init parse_tag_mem(const bp_tag_t *tag)
+{
+ struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
+ if (mi->type != MEMORY_TYPE_CONVENTIONAL)
+ return -1;
+
+ return memblock_add(mi->start, mi->end - mi->start);
+}
+
+__tagtable(BP_TAG_MEMORY, parse_tag_mem);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int __init parse_tag_initrd(const bp_tag_t* tag)
+{
+ struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
+ initrd_start = (unsigned long)__va(mi->start);
+ initrd_end = (unsigned long)__va(mi->end);
+
+ return 0;
+}
+
+__tagtable(BP_TAG_INITRD, parse_tag_initrd);
+
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+#ifdef CONFIG_USE_OF
+
+static int __init parse_tag_fdt(const bp_tag_t *tag)
+{
+ dtb_start = __va(tag->data[0]);
+ return 0;
+}
+
+__tagtable(BP_TAG_FDT, parse_tag_fdt);
+
+#endif /* CONFIG_USE_OF */
+
+static int __init parse_tag_cmdline(const bp_tag_t* tag)
+{
+ strscpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
+ return 0;
+}
+
+__tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline);
+
+static int __init parse_bootparam(const bp_tag_t* tag)
+{
+ extern tagtable_t __tagtable_begin, __tagtable_end;
+ tagtable_t *t;
+
+ /* Boot parameters must start with a BP_TAG_FIRST tag. */
+
+ if (tag->id != BP_TAG_FIRST) {
+ pr_warn("Invalid boot parameters!\n");
+ return 0;
+ }
+
+ tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size);
+
+ /* Parse all tags. */
+
+ while (tag != NULL && tag->id != BP_TAG_LAST) {
+ for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
+ if (tag->id == t->tag) {
+ t->parse(tag);
+ break;
+ }
+ }
+ if (t == &__tagtable_end)
+ pr_warn("Ignoring tag 0x%08x\n", tag->id);
+ tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
+ }
+
+ return 0;
+}
+#else
+static int __init parse_bootparam(const bp_tag_t *tag)
+{
+ pr_info("Ignoring boot parameters at %p\n", tag);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_USE_OF
+
+#if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
+unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
+EXPORT_SYMBOL(xtensa_kio_paddr);
+
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ const __be32 *ranges;
+ int len;
+
+ if (depth > 1)
+ return 0;
+
+ if (!of_flat_dt_is_compatible(node, "simple-bus"))
+ return 0;
+
+ ranges = of_get_flat_dt_prop(node, "ranges", &len);
+ if (!ranges)
+ return 1;
+ if (len == 0)
+ return 1;
+
+ xtensa_kio_paddr = of_read_ulong(ranges+1, 1);
+ /* round down to nearest 256MB boundary */
+ xtensa_kio_paddr &= 0xf0000000;
+
+ init_kio();
+
+ return 1;
+}
+#else
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ return 1;
+}
+#endif
+
+void __init early_init_devtree(void *params)
+{
+ early_init_dt_scan(params);
+ of_scan_flat_dt(xtensa_dt_io_area, NULL);
+
+ if (!command_line[0])
+ strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+}
+
+#endif /* CONFIG_USE_OF */
+
+/*
+ * Initialize architecture. (Early stage)
+ */
+
+void __init init_arch(bp_tag_t *bp_start)
+{
+ /* Initialize MMU. */
+
+ init_mmu();
+
+ /* Initialize initial KASAN shadow map */
+
+ kasan_early_init();
+
+ /* Parse boot parameters */
+
+ if (bp_start)
+ parse_bootparam(bp_start);
+
+#ifdef CONFIG_USE_OF
+ early_init_devtree(dtb_start);
+#endif
+
+#ifdef CONFIG_CMDLINE_BOOL
+ if (!command_line[0])
+ strscpy(command_line, default_command_line, COMMAND_LINE_SIZE);
+#endif
+
+ /* Early hook for platforms */
+
+ platform_init(bp_start);
+}
+
+/*
+ * Initialize system. Setup memory and reserve regions.
+ */
+
+static inline int __init_memblock mem_reserve(unsigned long start,
+ unsigned long end)
+{
+ return memblock_reserve(start, end - start);
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ pr_info("config ID: %08x:%08x\n",
+ xtensa_get_sr(SREG_EPC), xtensa_get_sr(SREG_EXCSAVE));
+ if (xtensa_get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 ||
+ xtensa_get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1)
+ pr_info("built for config ID: %08x:%08x\n",
+ XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1);
+
+ *cmdline_p = command_line;
+ platform_setup(cmdline_p);
+ strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+
+ /* Reserve some memory regions */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start < initrd_end &&
+ !mem_reserve(__pa(initrd_start), __pa(initrd_end)))
+ initrd_below_start_ok = 1;
+ else
+ initrd_start = 0;
+#endif
+
+ mem_reserve(__pa(_stext), __pa(_end));
+#ifdef CONFIG_XIP_KERNEL
+ mem_reserve(__pa(_xip_start), __pa(_xip_end));
+#endif
+
+#ifdef CONFIG_VECTORS_ADDR
+#ifdef SUPPORT_WINDOWED
+ mem_reserve(__pa(_WindowVectors_text_start),
+ __pa(_WindowVectors_text_end));
+#endif
+
+ mem_reserve(__pa(_DebugInterruptVector_text_start),
+ __pa(_DebugInterruptVector_text_end));
+
+ mem_reserve(__pa(_KernelExceptionVector_text_start),
+ __pa(_KernelExceptionVector_text_end));
+
+ mem_reserve(__pa(_UserExceptionVector_text_start),
+ __pa(_UserExceptionVector_text_end));
+
+ mem_reserve(__pa(_DoubleExceptionVector_text_start),
+ __pa(_DoubleExceptionVector_text_end));
+
+ mem_reserve(__pa(_exception_text_start),
+ __pa(_exception_text_end));
+#if XCHAL_EXCM_LEVEL >= 2
+ mem_reserve(__pa(_Level2InterruptVector_text_start),
+ __pa(_Level2InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ mem_reserve(__pa(_Level3InterruptVector_text_start),
+ __pa(_Level3InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ mem_reserve(__pa(_Level4InterruptVector_text_start),
+ __pa(_Level4InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ mem_reserve(__pa(_Level5InterruptVector_text_start),
+ __pa(_Level5InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ mem_reserve(__pa(_Level6InterruptVector_text_start),
+ __pa(_Level6InterruptVector_text_end));
+#endif
+
+#endif /* CONFIG_VECTORS_ADDR */
+
+#ifdef CONFIG_SECONDARY_RESET_VECTOR
+ mem_reserve(__pa(_SecondaryResetVector_text_start),
+ __pa(_SecondaryResetVector_text_end));
+#endif
+ parse_early_param();
+ bootmem_init();
+ kasan_init();
+ unflatten_and_copy_device_tree();
+
+#ifdef CONFIG_SMP
+ smp_init_cpus();
+#endif
+
+ paging_init();
+ zones_init();
+
+#ifdef CONFIG_VT
+# if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+# endif
+#endif
+}
+
+static DEFINE_PER_CPU(struct cpu, cpu_data);
+
+static int __init topology_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cpu *cpu = &per_cpu(cpu_data, i);
+ cpu->hotpluggable = !!i;
+ register_cpu(cpu, i);
+ }
+
+ return 0;
+}
+subsys_initcall(topology_init);
+
+void cpu_reset(void)
+{
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
+ local_irq_disable();
+ /*
+ * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
+ * be flushed.
+ * Way 4 is not currently used by linux.
+ * Ways 5 and 6 shall not be touched on MMUv2 as they are hardwired.
+ * Way 5 shall be flushed and way 6 shall be set to identity mapping
+ * on MMUv3.
+ */
+ local_flush_tlb_all();
+ invalidate_page_directory();
+#if XCHAL_HAVE_SPANNING_WAY
+ /* MMU v3 */
+ {
+ unsigned long vaddr = (unsigned long)cpu_reset;
+ unsigned long paddr = __pa(vaddr);
+ unsigned long tmpaddr = vaddr + SZ_512M;
+ unsigned long tmp0, tmp1, tmp2, tmp3;
+
+ /*
+ * Find a place for the temporary mapping. It must not be
+ * in the same 512MB region with vaddr or paddr, otherwise
+ * there may be multihit exception either on entry to the
+ * temporary mapping, or on entry to the identity mapping.
+ * (512MB is the biggest page size supported by TLB.)
+ */
+ while (((tmpaddr ^ paddr) & -SZ_512M) == 0)
+ tmpaddr += SZ_512M;
+
+ /* Invalidate mapping in the selected temporary area */
+ if (itlb_probe(tmpaddr) & BIT(ITLB_HIT_BIT))
+ invalidate_itlb_entry(itlb_probe(tmpaddr));
+ if (itlb_probe(tmpaddr + PAGE_SIZE) & BIT(ITLB_HIT_BIT))
+ invalidate_itlb_entry(itlb_probe(tmpaddr + PAGE_SIZE));
+
+ /*
+ * Map two consecutive pages starting at the physical address
+ * of this function to the temporary mapping area.
+ */
+ write_itlb_entry(__pte((paddr & PAGE_MASK) |
+ _PAGE_HW_VALID |
+ _PAGE_HW_EXEC |
+ _PAGE_CA_BYPASS),
+ tmpaddr & PAGE_MASK);
+ write_itlb_entry(__pte(((paddr & PAGE_MASK) + PAGE_SIZE) |
+ _PAGE_HW_VALID |
+ _PAGE_HW_EXEC |
+ _PAGE_CA_BYPASS),
+ (tmpaddr & PAGE_MASK) + PAGE_SIZE);
+
+ /* Reinitialize TLB */
+ __asm__ __volatile__ ("movi %0, 1f\n\t"
+ "movi %3, 2f\n\t"
+ "add %0, %0, %4\n\t"
+ "add %3, %3, %5\n\t"
+ "jx %0\n"
+ /*
+ * No literal, data or stack access
+ * below this point
+ */
+ "1:\n\t"
+ /* Initialize *tlbcfg */
+ "movi %0, 0\n\t"
+ "wsr %0, itlbcfg\n\t"
+ "wsr %0, dtlbcfg\n\t"
+ /* Invalidate TLB way 5 */
+ "movi %0, 4\n\t"
+ "movi %1, 5\n"
+ "1:\n\t"
+ "iitlb %1\n\t"
+ "idtlb %1\n\t"
+ "add %1, %1, %6\n\t"
+ "addi %0, %0, -1\n\t"
+ "bnez %0, 1b\n\t"
+ /* Initialize TLB way 6 */
+ "movi %0, 7\n\t"
+ "addi %1, %9, 3\n\t"
+ "addi %2, %9, 6\n"
+ "1:\n\t"
+ "witlb %1, %2\n\t"
+ "wdtlb %1, %2\n\t"
+ "add %1, %1, %7\n\t"
+ "add %2, %2, %7\n\t"
+ "addi %0, %0, -1\n\t"
+ "bnez %0, 1b\n\t"
+ "isync\n\t"
+ /* Jump to identity mapping */
+ "jx %3\n"
+ "2:\n\t"
+ /* Complete way 6 initialization */
+ "witlb %1, %2\n\t"
+ "wdtlb %1, %2\n\t"
+ /* Invalidate temporary mapping */
+ "sub %0, %9, %7\n\t"
+ "iitlb %0\n\t"
+ "add %0, %0, %8\n\t"
+ "iitlb %0"
+ : "=&a"(tmp0), "=&a"(tmp1), "=&a"(tmp2),
+ "=&a"(tmp3)
+ : "a"(tmpaddr - vaddr),
+ "a"(paddr - vaddr),
+ "a"(SZ_128M), "a"(SZ_512M),
+ "a"(PAGE_SIZE),
+ "a"((tmpaddr + SZ_512M) & PAGE_MASK)
+ : "memory");
+ }
+#endif
+#endif
+ __asm__ __volatile__ ("movi a2, 0\n\t"
+ "wsr a2, icountlevel\n\t"
+ "movi a2, 0\n\t"
+ "wsr a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
+ "wsr a2, ibreakenable\n\t"
+#endif
+#if XCHAL_HAVE_LOOPS
+ "wsr a2, lcount\n\t"
+#endif
+ "movi a2, 0x1f\n\t"
+ "wsr a2, ps\n\t"
+ "isync\n\t"
+ "jx %0\n\t"
+ :
+ : "a" (XCHAL_RESET_VECTOR_VADDR)
+ : "a2");
+ for (;;)
+ ;
+}
+
+void machine_restart(char * cmd)
+{
+ platform_restart();
+}
+
+void machine_halt(void)
+{
+ platform_halt();
+ while (1);
+}
+
+void machine_power_off(void)
+{
+ platform_power_off();
+ while (1);
+}
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Display some core information through /proc/cpuinfo.
+ */
+
+static int
+c_show(struct seq_file *f, void *slot)
+{
+ /* high-level stuff */
+ seq_printf(f, "CPU count\t: %u\n"
+ "CPU list\t: %*pbl\n"
+ "vendor_id\t: Tensilica\n"
+ "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
+ "core ID\t\t: " XCHAL_CORE_ID "\n"
+ "build ID\t: 0x%x\n"
+ "config ID\t: %08x:%08x\n"
+ "byte order\t: %s\n"
+ "cpu MHz\t\t: %lu.%02lu\n"
+ "bogomips\t: %lu.%02lu\n",
+ num_online_cpus(),
+ cpumask_pr_args(cpu_online_mask),
+ XCHAL_BUILD_UNIQUE_ID,
+ xtensa_get_sr(SREG_EPC), xtensa_get_sr(SREG_EXCSAVE),
+ XCHAL_HAVE_BE ? "big" : "little",
+ ccount_freq/1000000,
+ (ccount_freq/10000) % 100,
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100);
+ seq_puts(f, "flags\t\t: "
+#if XCHAL_HAVE_NMI
+ "nmi "
+#endif
+#if XCHAL_HAVE_DEBUG
+ "debug "
+# if XCHAL_HAVE_OCD
+ "ocd "
+# endif
+#endif
+#if XCHAL_HAVE_DENSITY
+ "density "
+#endif
+#if XCHAL_HAVE_BOOLEANS
+ "boolean "
+#endif
+#if XCHAL_HAVE_LOOPS
+ "loop "
+#endif
+#if XCHAL_HAVE_NSA
+ "nsa "
+#endif
+#if XCHAL_HAVE_MINMAX
+ "minmax "
+#endif
+#if XCHAL_HAVE_SEXT
+ "sext "
+#endif
+#if XCHAL_HAVE_CLAMPS
+ "clamps "
+#endif
+#if XCHAL_HAVE_MAC16
+ "mac16 "
+#endif
+#if XCHAL_HAVE_MUL16
+ "mul16 "
+#endif
+#if XCHAL_HAVE_MUL32
+ "mul32 "
+#endif
+#if XCHAL_HAVE_MUL32_HIGH
+ "mul32h "
+#endif
+#if XCHAL_HAVE_FP
+ "fpu "
+#endif
+#if XCHAL_HAVE_S32C1I
+ "s32c1i "
+#endif
+#if XCHAL_HAVE_EXCLUSIVE
+ "exclusive "
+#endif
+ "\n");
+
+ /* Registers. */
+ seq_printf(f,"physical aregs\t: %d\n"
+ "misc regs\t: %d\n"
+ "ibreak\t\t: %d\n"
+ "dbreak\t\t: %d\n",
+ XCHAL_NUM_AREGS,
+ XCHAL_NUM_MISC_REGS,
+ XCHAL_NUM_IBREAK,
+ XCHAL_NUM_DBREAK);
+
+
+ /* Interrupt. */
+ seq_printf(f,"num ints\t: %d\n"
+ "ext ints\t: %d\n"
+ "int levels\t: %d\n"
+ "timers\t\t: %d\n"
+ "debug level\t: %d\n",
+ XCHAL_NUM_INTERRUPTS,
+ XCHAL_NUM_EXTINTERRUPTS,
+ XCHAL_NUM_INTLEVELS,
+ XCHAL_NUM_TIMERS,
+ XCHAL_DEBUGLEVEL);
+
+ /* Cache */
+ seq_printf(f,"icache line size: %d\n"
+ "icache ways\t: %d\n"
+ "icache size\t: %d\n"
+ "icache flags\t: "
+#if XCHAL_ICACHE_LINE_LOCKABLE
+ "lock "
+#endif
+ "\n"
+ "dcache line size: %d\n"
+ "dcache ways\t: %d\n"
+ "dcache size\t: %d\n"
+ "dcache flags\t: "
+#if XCHAL_DCACHE_IS_WRITEBACK
+ "writeback "
+#endif
+#if XCHAL_DCACHE_LINE_LOCKABLE
+ "lock "
+#endif
+ "\n",
+ XCHAL_ICACHE_LINESIZE,
+ XCHAL_ICACHE_WAYS,
+ XCHAL_ICACHE_SIZE,
+ XCHAL_DCACHE_LINESIZE,
+ XCHAL_DCACHE_WAYS,
+ XCHAL_DCACHE_SIZE);
+
+ return 0;
+}
+
+/*
+ * We show only CPU #0 info.
+ */
+static void *
+c_start(struct seq_file *f, loff_t *pos)
+{
+ return (*pos == 0) ? (void *)1 : NULL;
+}
+
+static void *
+c_next(struct seq_file *f, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(f, pos);
+}
+
+static void
+c_stop(struct seq_file *f, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op =
+{
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show,
+};
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
new file mode 100644
index 000000000..5c01d7e70
--- /dev/null
+++ b/arch/xtensa/kernel/signal.c
@@ -0,0 +1,533 @@
+/*
+ * arch/xtensa/kernel/signal.c
+ *
+ * Default platform functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005, 2006 Tensilica Inc.
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ */
+
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/resume_user_mode.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/ucontext.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/coprocessor.h>
+#include <asm/unistd.h>
+
+extern struct task_struct *coproc_owners[];
+
+struct rt_sigframe
+{
+ struct siginfo info;
+ struct ucontext uc;
+ struct {
+ xtregs_opt_t opt;
+ xtregs_user_t user;
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_coprocessor_t cp;
+#endif
+ } xtregs;
+ unsigned char retcode[6];
+ unsigned int window[4];
+};
+
+#if defined(USER_SUPPORT_WINDOWED)
+/*
+ * Flush register windows stored in pt_regs to stack.
+ * Returns 1 for errors.
+ */
+
+static int
+flush_window_regs_user(struct pt_regs *regs)
+{
+ const unsigned long ws = regs->windowstart;
+ const unsigned long wb = regs->windowbase;
+ unsigned long sp = 0;
+ unsigned long wm;
+ int err = 1;
+ int base;
+
+ /* Return if no other frames. */
+
+ if (regs->wmask == 1)
+ return 0;
+
+ /* Rotate windowmask and skip empty frames. */
+
+ wm = (ws >> wb) | (ws << (XCHAL_NUM_AREGS / 4 - wb));
+ base = (XCHAL_NUM_AREGS / 4) - (regs->wmask >> 4);
+
+ /* For call8 or call12 frames, we need the previous stack pointer. */
+
+ if ((regs->wmask & 2) == 0)
+ if (__get_user(sp, (int*)(regs->areg[base * 4 + 1] - 12)))
+ goto errout;
+
+ /* Spill frames to stack. */
+
+ while (base < XCHAL_NUM_AREGS / 4) {
+
+ int m = (wm >> base);
+ int inc = 0;
+
+ /* Save registers a4..a7 (call8) or a4...a11 (call12) */
+
+ if (m & 2) { /* call4 */
+ inc = 1;
+
+ } else if (m & 4) { /* call8 */
+ if (copy_to_user(&SPILL_SLOT_CALL8(sp, 4),
+ &regs->areg[(base + 1) * 4], 16))
+ goto errout;
+ inc = 2;
+
+ } else if (m & 8) { /* call12 */
+ if (copy_to_user(&SPILL_SLOT_CALL12(sp, 4),
+ &regs->areg[(base + 1) * 4], 32))
+ goto errout;
+ inc = 3;
+ }
+
+ /* Save current frame a0..a3 under next SP */
+
+ sp = regs->areg[((base + inc) * 4 + 1) % XCHAL_NUM_AREGS];
+ if (copy_to_user(&SPILL_SLOT(sp, 0), &regs->areg[base * 4], 16))
+ goto errout;
+
+ /* Get current stack pointer for next loop iteration. */
+
+ sp = regs->areg[base * 4 + 1];
+ base += inc;
+ }
+
+ regs->wmask = 1;
+ regs->windowstart = 1 << wb;
+
+ return 0;
+
+errout:
+ return err;
+}
+#else
+static int
+flush_window_regs_user(struct pt_regs *regs)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Note: We don't copy double exception 'regs', we have to finish double exc.
+ * first before we return to signal handler! This dbl.exc.handler might cause
+ * another double exception, but I think we are fine as the situation is the
+ * same as if we had returned to the signal handerl and got an interrupt
+ * immediately...
+ */
+
+static int
+setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
+{
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ struct thread_info *ti = current_thread_info();
+ int err = 0;
+
+#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
+ COPY(pc);
+ COPY(ps);
+ COPY(lbeg);
+ COPY(lend);
+ COPY(lcount);
+ COPY(sar);
+#undef COPY
+
+ err |= flush_window_regs_user(regs);
+ err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
+ err |= __put_user(0, &sc->sc_xtregs);
+
+ if (err)
+ return err;
+
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_flush_release_all(ti);
+ err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
+ sizeof (frame->xtregs.cp));
+#endif
+ err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt,
+ sizeof (xtregs_opt_t));
+ err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
+ sizeof (xtregs_user_t));
+
+ err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs);
+
+ return err;
+}
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
+{
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ struct thread_info *ti = current_thread_info();
+ unsigned int err = 0;
+ unsigned long ps;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
+ COPY(pc);
+ COPY(lbeg);
+ COPY(lend);
+ COPY(lcount);
+ COPY(sar);
+#undef COPY
+
+ /* All registers were flushed to stack. Start with a pristine frame. */
+
+ regs->wmask = 1;
+ regs->windowbase = 0;
+ regs->windowstart = 1;
+
+ regs->syscall = NO_SYSCALL; /* disable syscall checks */
+
+ /* For PS, restore only PS.CALLINC.
+ * Assume that all other bits are either the same as for the signal
+ * handler, or the user mode value doesn't matter (e.g. PS.OWB).
+ */
+ err |= __get_user(ps, &sc->sc_ps);
+ regs->ps = (regs->ps & ~PS_CALLINC_MASK) | (ps & PS_CALLINC_MASK);
+
+ /* Additional corruption checks */
+
+ if ((regs->lcount > 0)
+ && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
+ err = 1;
+
+ err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
+
+ if (err)
+ return err;
+
+ /* The signal handler may have used coprocessors in which
+ * case they are still enabled. We disable them to force a
+ * reloading of the original task's CP state by the lazy
+ * context-switching mechanisms of CP exception handling.
+ * Also, we essentially discard any coprocessor state that the
+ * signal handler created. */
+
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_release_all(ti);
+ err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp,
+ sizeof (frame->xtregs.cp));
+#endif
+ err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
+ sizeof (xtregs_user_t));
+ err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt,
+ sizeof (xtregs_opt_t));
+
+ return err;
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+asmlinkage long xtensa_rt_sigreturn(void)
+{
+ struct pt_regs *regs = current_pt_regs();
+ struct rt_sigframe __user *frame;
+ sigset_t set;
+ int ret;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ if (regs->depc > 64)
+ panic("rt_sigreturn in double exception!\n");
+
+ frame = (struct rt_sigframe __user *) regs->areg[1];
+
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, frame))
+ goto badframe;
+
+ ret = regs->areg[2];
+
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return ret;
+
+badframe:
+ force_sig(SIGSEGV);
+ return 0;
+}
+
+
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+gen_return_code(unsigned char *codemem)
+{
+ int err = 0;
+
+ /*
+ * The 12-bit immediate is really split up within the 24-bit MOVI
+ * instruction. As long as the above system call numbers fit within
+ * 8-bits, the following code works fine. See the Xtensa ISA for
+ * details.
+ */
+
+#if __NR_rt_sigreturn > 255
+# error Generating the MOVI instruction below breaks!
+#endif
+
+#ifdef __XTENSA_EB__ /* Big Endian version */
+ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
+ err |= __put_user(0x22, &codemem[0]);
+ err |= __put_user(0x0a, &codemem[1]);
+ err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
+ /* Generate instruction: SYSCALL */
+ err |= __put_user(0x00, &codemem[3]);
+ err |= __put_user(0x05, &codemem[4]);
+ err |= __put_user(0x00, &codemem[5]);
+
+#elif defined __XTENSA_EL__ /* Little Endian version */
+ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
+ err |= __put_user(0x22, &codemem[0]);
+ err |= __put_user(0xa0, &codemem[1]);
+ err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
+ /* Generate instruction: SYSCALL */
+ err |= __put_user(0x00, &codemem[3]);
+ err |= __put_user(0x50, &codemem[4]);
+ err |= __put_user(0x00, &codemem[5]);
+#else
+# error Must use compiler for Xtensa processors.
+#endif
+
+ /* Flush generated code out of the data cache */
+
+ if (err == 0) {
+ __invalidate_icache_range((unsigned long)codemem, 6UL);
+ __flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
+ }
+
+ return err;
+}
+
+
+static int setup_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0, sig = ksig->sig;
+ unsigned long sp, ra, tp, ps;
+ unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
+ unsigned long handler_fdpic_GOT = 0;
+ unsigned int base;
+ bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&
+ (current->personality & FDPIC_FUNCPTRS);
+
+ if (fdpic) {
+ unsigned long __user *fdpic_func_desc =
+ (unsigned long __user *)handler;
+ if (__get_user(handler, &fdpic_func_desc[0]) ||
+ __get_user(handler_fdpic_GOT, &fdpic_func_desc[1]))
+ return -EFAULT;
+ }
+
+ sp = regs->areg[1];
+
+ if ((ksig->ka.sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+ frame = (void *)((sp - sizeof(*frame)) & -16ul);
+
+ if (regs->depc > 64)
+ panic ("Double exception sys_sigreturn\n");
+
+ if (!access_ok(frame, sizeof(*frame))) {
+ return -EFAULT;
+ }
+
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+ }
+
+ /* Create the user context. */
+
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->areg[1]);
+ err |= setup_sigcontext(frame, regs);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ if (ksig->ka.sa.sa_flags & SA_RESTORER) {
+ if (fdpic) {
+ unsigned long __user *fdpic_func_desc =
+ (unsigned long __user *)ksig->ka.sa.sa_restorer;
+
+ err |= __get_user(ra, fdpic_func_desc);
+ } else {
+ ra = (unsigned long)ksig->ka.sa.sa_restorer;
+ }
+ } else {
+
+ /* Create sys_rt_sigreturn syscall in stack frame */
+
+ err |= gen_return_code(frame->retcode);
+ ra = (unsigned long) frame->retcode;
+ }
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Create signal handler execution context.
+ * Return context not modified until this point.
+ */
+
+ /* Set up registers for signal handler; preserve the threadptr */
+ tp = regs->threadptr;
+ ps = regs->ps;
+ start_thread(regs, handler, (unsigned long)frame);
+
+ /* Set up a stack frame for a call4 if userspace uses windowed ABI */
+ if (ps & PS_WOE_MASK) {
+ base = 4;
+ regs->areg[base] =
+ (((unsigned long) ra) & 0x3fffffff) | 0x40000000;
+ ps = (ps & ~(PS_CALLINC_MASK | PS_OWB_MASK)) |
+ (1 << PS_CALLINC_SHIFT);
+ } else {
+ base = 0;
+ regs->areg[base] = (unsigned long) ra;
+ }
+ regs->areg[base + 2] = (unsigned long) sig;
+ regs->areg[base + 3] = (unsigned long) &frame->info;
+ regs->areg[base + 4] = (unsigned long) &frame->uc;
+ regs->threadptr = tp;
+ regs->ps = ps;
+ if (fdpic)
+ regs->areg[base + 11] = handler_fdpic_GOT;
+
+ pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n",
+ current->comm, current->pid, sig, frame, regs->pc);
+
+ return 0;
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ task_pt_regs(current)->icountlevel = 0;
+
+ if (get_signal(&ksig)) {
+ int ret;
+
+ /* Are we from a system call? */
+
+ if (regs->syscall != NO_SYSCALL) {
+
+ /* If so, check system call restarting.. */
+
+ switch (regs->areg[2]) {
+ case -ERESTARTNOHAND:
+ case -ERESTART_RESTARTBLOCK:
+ regs->areg[2] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ksig.ka.sa.sa_flags & SA_RESTART)) {
+ regs->areg[2] = -EINTR;
+ break;
+ }
+ fallthrough;
+ case -ERESTARTNOINTR:
+ regs->areg[2] = regs->syscall;
+ regs->pc -= 3;
+ break;
+
+ default:
+ /* nothing to do */
+ if (regs->areg[2] != 0)
+ break;
+ }
+ }
+
+ /* Whee! Actually deliver the signal. */
+ /* Set up the stack frame */
+ ret = setup_frame(&ksig, sigmask_to_save(), regs);
+ signal_setup_done(ret, &ksig, 0);
+ if (test_thread_flag(TIF_SINGLESTEP))
+ task_pt_regs(current)->icountlevel = 1;
+
+ return;
+ }
+
+ /* Did we come from a system call? */
+ if (regs->syscall != NO_SYSCALL) {
+ /* Restart the system call - no handlers present */
+ switch (regs->areg[2]) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->areg[2] = regs->syscall;
+ regs->pc -= 3;
+ break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->areg[2] = __NR_restart_syscall;
+ regs->pc -= 3;
+ break;
+ }
+ }
+
+ /* If there's no signal to deliver, we just restore the saved mask. */
+ restore_saved_sigmask();
+
+ if (test_thread_flag(TIF_SINGLESTEP))
+ task_pt_regs(current)->icountlevel = 1;
+ return;
+}
+
+void do_notify_resume(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SIGPENDING) ||
+ test_thread_flag(TIF_NOTIFY_SIGNAL))
+ do_signal(regs);
+
+ if (test_thread_flag(TIF_NOTIFY_RESUME))
+ resume_user_mode_work(regs);
+}
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
new file mode 100644
index 000000000..4dc109dd6
--- /dev/null
+++ b/arch/xtensa/kernel/smp.c
@@ -0,0 +1,634 @@
+/*
+ * Xtensa SMP support functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ * Pete Delaney <piet@tensilica.com
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/reboot.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/thread_info.h>
+
+#include <asm/cacheflush.h>
+#include <asm/coprocessor.h>
+#include <asm/kdebug.h>
+#include <asm/mmu_context.h>
+#include <asm/mxregs.h>
+#include <asm/platform.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+#ifdef CONFIG_SMP
+# if XCHAL_HAVE_S32C1I == 0
+# error "The S32C1I option is required for SMP."
+# endif
+#endif
+
+static void system_invalidate_dcache_range(unsigned long start,
+ unsigned long size);
+static void system_flush_invalidate_dcache_range(unsigned long start,
+ unsigned long size);
+
+/* IPI (Inter Process Interrupt) */
+
+#define IPI_IRQ 0
+
+static irqreturn_t ipi_interrupt(int irq, void *dev_id);
+
+void ipi_init(void)
+{
+ unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
+ if (request_irq(irq, ipi_interrupt, IRQF_PERCPU, "ipi", NULL))
+ pr_err("Failed to request irq %u (ipi)\n", irq);
+}
+
+static inline unsigned int get_core_count(void)
+{
+ /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
+ unsigned int syscfgid = get_er(SYSCFGID);
+ return ((syscfgid >> 18) & 0xf) + 1;
+}
+
+static inline int get_core_id(void)
+{
+ /* Bits 0...18 of SYSCFGID contain the core id */
+ unsigned int core_id = get_er(SYSCFGID);
+ return core_id & 0x3fff;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned i;
+
+ for_each_possible_cpu(i)
+ set_cpu_present(i, true);
+}
+
+void __init smp_init_cpus(void)
+{
+ unsigned i;
+ unsigned int ncpus = get_core_count();
+ unsigned int core_id = get_core_id();
+
+ pr_info("%s: Core Count = %d\n", __func__, ncpus);
+ pr_info("%s: Core Id = %d\n", __func__, core_id);
+
+ if (ncpus > NR_CPUS) {
+ ncpus = NR_CPUS;
+ pr_info("%s: limiting core count by %d\n", __func__, ncpus);
+ }
+
+ for (i = 0; i < ncpus; ++i)
+ set_cpu_possible(i, true);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ unsigned int cpu = smp_processor_id();
+ BUG_ON(cpu != 0);
+ cpu_asid_cache(cpu) = ASID_USER_FIRST;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
+static DECLARE_COMPLETION(cpu_running);
+
+void secondary_start_kernel(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ init_mmu();
+
+#ifdef CONFIG_DEBUG_MISC
+ if (boot_secondary_processors == 0) {
+ pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
+ __func__, boot_secondary_processors, cpu);
+ for (;;)
+ __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
+ }
+
+ pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
+ __func__, boot_secondary_processors, cpu);
+#endif
+ /* Init EXCSAVE1 */
+
+ secondary_trap_init();
+
+ /* All kernel threads share the same mm context. */
+
+ mmget(mm);
+ mmgrab(mm);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+ enter_lazy_tlb(mm, current);
+
+ trace_hardirqs_off();
+
+ calibrate_delay();
+
+ notify_cpu_starting(cpu);
+
+ secondary_init_irq();
+ local_timer_setup(cpu);
+
+ set_cpu_online(cpu, true);
+
+ local_irq_enable();
+
+ complete(&cpu_running);
+
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+static void mx_cpu_start(void *p)
+{
+ unsigned cpu = (unsigned)p;
+ unsigned long run_stall_mask = get_er(MPSCORE);
+
+ set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
+ pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+ __func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+static void mx_cpu_stop(void *p)
+{
+ unsigned cpu = (unsigned)p;
+ unsigned long run_stall_mask = get_er(MPSCORE);
+
+ set_er(run_stall_mask | (1u << cpu), MPSCORE);
+ pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+ __func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+unsigned long cpu_start_id __cacheline_aligned;
+#endif
+unsigned long cpu_start_ccount;
+
+static int boot_secondary(unsigned int cpu, struct task_struct *ts)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ unsigned long ccount;
+ int i;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ WRITE_ONCE(cpu_start_id, cpu);
+ /* Pairs with the third memw in the cpu_restart */
+ mb();
+ system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
+#endif
+ smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
+
+ for (i = 0; i < 2; ++i) {
+ do
+ ccount = get_ccount();
+ while (!ccount);
+
+ WRITE_ONCE(cpu_start_ccount, ccount);
+
+ do {
+ /*
+ * Pairs with the first two memws in the
+ * .Lboot_secondary.
+ */
+ mb();
+ ccount = READ_ONCE(cpu_start_ccount);
+ } while (ccount && time_before(jiffies, timeout));
+
+ if (ccount) {
+ smp_call_function_single(0, mx_cpu_stop,
+ (void *)cpu, 1);
+ WRITE_ONCE(cpu_start_ccount, 0);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ int ret = 0;
+
+ if (cpu_asid_cache(cpu) == 0)
+ cpu_asid_cache(cpu) = ASID_USER_FIRST;
+
+ start_info.stack = (unsigned long)task_pt_regs(idle);
+ wmb();
+
+ pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
+ __func__, cpu, idle, start_info.stack);
+
+ init_completion(&cpu_running);
+ ret = boot_secondary(cpu, idle);
+ if (ret == 0) {
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
+ if (!cpu_online(cpu))
+ ret = -EIO;
+ }
+
+ if (ret)
+ pr_err("CPU %u failed to boot\n", cpu);
+
+ return ret;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+#if XTENSA_HAVE_COPROCESSORS
+ /*
+ * Flush coprocessor contexts that are active on the current CPU.
+ */
+ local_coprocessors_flush_release_all();
+#endif
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Flush user cache and TLB mappings, and then remove this CPU
+ * from the vm mask set of all processes.
+ */
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ invalidate_page_directory();
+
+ clear_tasks_mm_cpumask(cpu);
+
+ return 0;
+}
+
+static void platform_cpu_kill(unsigned int cpu)
+{
+ smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ while (time_before(jiffies, timeout)) {
+ system_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
+ /* Pairs with the second memw in the cpu_restart */
+ mb();
+ if (READ_ONCE(cpu_start_id) == -cpu) {
+ platform_cpu_kill(cpu);
+ return;
+ }
+ }
+ pr_err("CPU%u: unable to kill\n", cpu);
+}
+
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void __ref cpu_die(void)
+{
+ idle_task_exit();
+ local_irq_disable();
+ __asm__ __volatile__(
+ " movi a2, cpu_restart\n"
+ " jx a2\n");
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+enum ipi_msg_type {
+ IPI_RESCHEDULE = 0,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+ IPI_MAX
+};
+
+static const struct {
+ const char *short_text;
+ const char *long_text;
+} ipi_text[] = {
+ { .short_text = "RES", .long_text = "Rescheduling interrupts" },
+ { .short_text = "CAL", .long_text = "Function call interrupts" },
+ { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
+};
+
+struct ipi_data {
+ unsigned long ipi_count[IPI_MAX];
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void send_ipi_message(const struct cpumask *callmask,
+ enum ipi_msg_type msg_id)
+{
+ int index;
+ unsigned long mask = 0;
+
+ for_each_cpu(index, callmask)
+ mask |= 1 << index;
+
+ set_er(mask, MIPISET(msg_id));
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ send_ipi_message(&targets, IPI_CPU_STOP);
+}
+
+static void ipi_cpu_stop(unsigned int cpu)
+{
+ set_cpu_online(cpu, false);
+ machine_halt();
+}
+
+irqreturn_t ipi_interrupt(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+
+ for (;;) {
+ unsigned int msg;
+
+ msg = get_er(MIPICAUSE(cpu));
+ set_er(msg, MIPICAUSE(cpu));
+
+ if (!msg)
+ break;
+
+ if (msg & (1 << IPI_CALL_FUNC)) {
+ ++ipi->ipi_count[IPI_CALL_FUNC];
+ generic_smp_call_function_interrupt();
+ }
+
+ if (msg & (1 << IPI_RESCHEDULE)) {
+ ++ipi->ipi_count[IPI_RESCHEDULE];
+ scheduler_ipi();
+ }
+
+ if (msg & (1 << IPI_CPU_STOP)) {
+ ++ipi->ipi_count[IPI_CPU_STOP];
+ ipi_cpu_stop(cpu);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+ unsigned int cpu;
+ unsigned i;
+
+ for (i = 0; i < IPI_MAX; ++i) {
+ seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
+ for_each_online_cpu(cpu)
+ seq_printf(p, " %10lu",
+ per_cpu(ipi_data, cpu).ipi_count[i]);
+ seq_printf(p, " %s\n", ipi_text[i].long_text);
+ }
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ pr_debug("setup_profiling_timer %d\n", multiplier);
+ return 0;
+}
+
+/* TLB flush functions */
+
+struct flush_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void ipi_flush_tlb_all(void *arg)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+static void ipi_flush_tlb_mm(void *arg)
+{
+ local_flush_tlb_mm(arg);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ on_each_cpu(ipi_flush_tlb_mm, mm, 1);
+}
+
+static void ipi_flush_tlb_page(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = addr,
+ };
+ on_each_cpu(ipi_flush_tlb_page, &fd, 1);
+}
+
+static void ipi_flush_tlb_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_tlb_range, &fd, 1);
+}
+
+static void ipi_flush_tlb_kernel_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
+}
+
+/* Cache flush functions */
+
+static void ipi_flush_cache_all(void *arg)
+{
+ local_flush_cache_all();
+}
+
+void flush_cache_all(void)
+{
+ on_each_cpu(ipi_flush_cache_all, NULL, 1);
+}
+
+static void ipi_flush_cache_page(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = address,
+ .addr2 = pfn,
+ };
+ on_each_cpu(ipi_flush_cache_page, &fd, 1);
+}
+
+static void ipi_flush_cache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_cache_range, &fd, 1);
+}
+
+static void ipi_flush_icache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_icache_range(fd->addr1, fd->addr2);
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_icache_range, &fd, 1);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+/* ------------------------------------------------------------------------- */
+
+static void ipi_invalidate_dcache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ __invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_invalidate_dcache_range(unsigned long start,
+ unsigned long size)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = size,
+ };
+ on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
+}
+
+static void ipi_flush_invalidate_dcache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_flush_invalidate_dcache_range(unsigned long start,
+ unsigned long size)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = size,
+ };
+ on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
+}
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
new file mode 100644
index 000000000..7f7755cd2
--- /dev/null
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -0,0 +1,275 @@
+/*
+ * Kernel and userspace stack tracing.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+
+#if IS_ENABLED(CONFIG_PERF_EVENTS)
+
+/* Address of common_exception_return, used to check the
+ * transition from kernel to user space.
+ */
+extern int common_exception_return;
+
+void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data)
+{
+ unsigned long windowstart = regs->windowstart;
+ unsigned long windowbase = regs->windowbase;
+ unsigned long a0 = regs->areg[0];
+ unsigned long a1 = regs->areg[1];
+ unsigned long pc = regs->pc;
+ struct stackframe frame;
+ int index;
+
+ if (!depth--)
+ return;
+
+ frame.pc = pc;
+ frame.sp = a1;
+
+ if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+ return;
+
+ if (IS_ENABLED(CONFIG_USER_ABI_CALL0_ONLY) ||
+ (IS_ENABLED(CONFIG_USER_ABI_CALL0_PROBE) &&
+ !(regs->ps & PS_WOE_MASK)))
+ return;
+
+ /* Two steps:
+ *
+ * 1. Look through the register window for the
+ * previous PCs in the call trace.
+ *
+ * 2. Look on the stack.
+ */
+
+ /* Step 1. */
+ /* Rotate WINDOWSTART to move the bit corresponding to
+ * the current window to the bit #0.
+ */
+ windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
+
+ /* Look for bits that are set, they correspond to
+ * valid windows.
+ */
+ for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
+ if (windowstart & (1 << index)) {
+ /* Get the PC from a0 and a1. */
+ pc = MAKE_PC_FROM_RA(a0, pc);
+ /* Read a0 and a1 from the
+ * corresponding position in AREGs.
+ */
+ a0 = regs->areg[index * 4];
+ a1 = regs->areg[index * 4 + 1];
+
+ frame.pc = pc;
+ frame.sp = a1;
+
+ if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+ return;
+ }
+
+ /* Step 2. */
+ /* We are done with the register window, we need to
+ * look through the stack.
+ */
+ if (!depth)
+ return;
+
+ /* Start from the a1 register. */
+ /* a1 = regs->areg[1]; */
+ while (a0 != 0 && depth--) {
+ pc = MAKE_PC_FROM_RA(a0, pc);
+
+ /* Check if the region is OK to access. */
+ if (!access_ok(&SPILL_SLOT(a1, 0), 8))
+ return;
+ /* Copy a1, a0 from user space stack frame. */
+ if (__get_user(a0, &SPILL_SLOT(a1, 0)) ||
+ __get_user(a1, &SPILL_SLOT(a1, 1)))
+ return;
+
+ frame.pc = pc;
+ frame.sp = a1;
+
+ if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+ return;
+ }
+}
+EXPORT_SYMBOL(xtensa_backtrace_user);
+
+void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
+ int (*kfn)(struct stackframe *frame, void *data),
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data)
+{
+ unsigned long pc = regs->depc > VALID_DOUBLE_EXCEPTION_ADDRESS ?
+ regs->depc : regs->pc;
+ unsigned long sp_start, sp_end;
+ unsigned long a0 = regs->areg[0];
+ unsigned long a1 = regs->areg[1];
+
+ sp_start = a1 & ~(THREAD_SIZE - 1);
+ sp_end = sp_start + THREAD_SIZE;
+
+ /* Spill the register window to the stack first. */
+ spill_registers();
+
+ /* Read the stack frames one by one and create the PC
+ * from the a0 and a1 registers saved there.
+ */
+ while (a1 > sp_start && a1 < sp_end && depth--) {
+ struct stackframe frame;
+
+ frame.pc = pc;
+ frame.sp = a1;
+
+ if (kernel_text_address(pc) && kfn(&frame, data))
+ return;
+
+ if (pc == (unsigned long)&common_exception_return) {
+ regs = (struct pt_regs *)a1;
+ if (user_mode(regs)) {
+ if (ufn == NULL)
+ return;
+ xtensa_backtrace_user(regs, depth, ufn, data);
+ return;
+ }
+ a0 = regs->areg[0];
+ a1 = regs->areg[1];
+ continue;
+ }
+
+ sp_start = a1;
+
+ pc = MAKE_PC_FROM_RA(a0, pc);
+ a0 = SPILL_SLOT(a1, 0);
+ a1 = SPILL_SLOT(a1, 1);
+ }
+}
+EXPORT_SYMBOL(xtensa_backtrace_kernel);
+
+#endif
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data)
+{
+ unsigned long a0, a1;
+ unsigned long sp_end;
+
+ a1 = (unsigned long)sp;
+ sp_end = ALIGN(a1, THREAD_SIZE);
+
+ spill_registers();
+
+ while (a1 < sp_end) {
+ struct stackframe frame;
+
+ sp = (unsigned long *)a1;
+
+ a0 = SPILL_SLOT(a1, 0);
+ a1 = SPILL_SLOT(a1, 1);
+
+ if (a1 <= (unsigned long)sp)
+ break;
+
+ frame.pc = MAKE_PC_FROM_RA(a0, a1);
+ frame.sp = a1;
+
+ if (fn(&frame, data))
+ return;
+ }
+}
+
+#ifdef CONFIG_STACKTRACE
+
+struct stack_trace_data {
+ struct stack_trace *trace;
+ unsigned skip;
+};
+
+static int stack_trace_cb(struct stackframe *frame, void *data)
+{
+ struct stack_trace_data *trace_data = data;
+ struct stack_trace *trace = trace_data->trace;
+
+ if (trace_data->skip) {
+ --trace_data->skip;
+ return 0;
+ }
+ if (!kernel_text_address(frame->pc))
+ return 0;
+
+ trace->entries[trace->nr_entries++] = frame->pc;
+ return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+{
+ struct stack_trace_data trace_data = {
+ .trace = trace,
+ .skip = trace->skip,
+ };
+ walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct return_addr_data {
+ unsigned long addr;
+ unsigned skip;
+};
+
+static int return_address_cb(struct stackframe *frame, void *data)
+{
+ struct return_addr_data *r = data;
+
+ if (r->skip) {
+ --r->skip;
+ return 0;
+ }
+ if (!kernel_text_address(frame->pc))
+ return 0;
+ r->addr = frame->pc;
+ return 1;
+}
+
+/*
+ * level == 0 is for the return address from the caller of this function,
+ * not from this function itself.
+ */
+unsigned long return_address(unsigned level)
+{
+ struct return_addr_data r = {
+ .skip = level,
+ };
+ walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+ return r.addr;
+}
+EXPORT_SYMBOL(return_address);
+
+#endif
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
new file mode 100644
index 000000000..b3c2450d6
--- /dev/null
+++ b/arch/xtensa/kernel/syscall.c
@@ -0,0 +1,99 @@
+/*
+ * arch/xtensa/kernel/syscall.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+#include <linux/uaccess.h>
+#include <asm/syscall.h>
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/shm.h>
+
+syscall_t sys_call_table[] /* FIXME __cacheline_aligned */= {
+#define __SYSCALL(nr, entry) (syscall_t)entry,
+#include <asm/syscall_table.h>
+};
+
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+ (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
+{
+ unsigned long ret;
+ long err;
+
+ err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
+ if (err)
+ return err;
+ return (long)ret;
+}
+
+asmlinkage long xtensa_fadvise64_64(int fd, int advice,
+ unsigned long long offset, unsigned long long len)
+{
+ return ksys_fadvise64_64(fd, offset, len, advice);
+}
+
+#ifdef CONFIG_MMU
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct vm_area_struct *vmm;
+ struct vma_iterator vmi;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma_iter_init(&vmi, current->mm, addr);
+ for_each_vma(vmi, vmm) {
+ /* At this point: (addr < vmm->vm_end). */
+ if (addr + len <= vm_start_gap(vmm))
+ break;
+
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ }
+
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+
+ return addr;
+}
+#endif
diff --git a/arch/xtensa/kernel/syscalls/Makefile b/arch/xtensa/kernel/syscalls/Makefile
new file mode 100644
index 000000000..b265e4bc1
--- /dev/null
+++ b/arch/xtensa/kernel/syscalls/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+$(shell mkdir -p $(uapi) $(kapi))
+
+syscall := $(src)/syscall.tbl
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
+
+$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
+ $(call if_changed,syshdr)
+
+$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_32.h
+kapisyshdr-y += syscall_table.h
+
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
+
+PHONY += all
+all: $(uapisyshdr-y) $(kapisyshdr-y)
+ @:
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000..52c94ab5c
--- /dev/null
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -0,0 +1,423 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for xtensa
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "common" for this file
+#
+0 common spill sys_ni_syscall
+1 common xtensa sys_ni_syscall
+2 common available4 sys_ni_syscall
+3 common available5 sys_ni_syscall
+4 common available6 sys_ni_syscall
+5 common available7 sys_ni_syscall
+6 common available8 sys_ni_syscall
+7 common available9 sys_ni_syscall
+# File Operations
+8 common open sys_open
+9 common close sys_close
+10 common dup sys_dup
+11 common dup2 sys_dup2
+12 common read sys_read
+13 common write sys_write
+14 common select sys_select
+15 common lseek sys_lseek
+16 common poll sys_poll
+17 common _llseek sys_llseek
+18 common epoll_wait sys_epoll_wait
+19 common epoll_ctl sys_epoll_ctl
+20 common epoll_create sys_epoll_create
+21 common creat sys_creat
+22 common truncate sys_truncate
+23 common ftruncate sys_ftruncate
+24 common readv sys_readv
+25 common writev sys_writev
+26 common fsync sys_fsync
+27 common fdatasync sys_fdatasync
+28 common truncate64 sys_truncate64
+29 common ftruncate64 sys_ftruncate64
+30 common pread64 sys_pread64
+31 common pwrite64 sys_pwrite64
+32 common link sys_link
+33 common rename sys_rename
+34 common symlink sys_symlink
+35 common readlink sys_readlink
+36 common mknod sys_mknod
+37 common pipe sys_pipe
+38 common unlink sys_unlink
+39 common rmdir sys_rmdir
+40 common mkdir sys_mkdir
+41 common chdir sys_chdir
+42 common fchdir sys_fchdir
+43 common getcwd sys_getcwd
+44 common chmod sys_chmod
+45 common chown sys_chown
+46 common stat sys_newstat
+47 common stat64 sys_stat64
+48 common lchown sys_lchown
+49 common lstat sys_newlstat
+50 common lstat64 sys_lstat64
+51 common available51 sys_ni_syscall
+52 common fchmod sys_fchmod
+53 common fchown sys_fchown
+54 common fstat sys_newfstat
+55 common fstat64 sys_fstat64
+56 common flock sys_flock
+57 common access sys_access
+58 common umask sys_umask
+59 common getdents sys_getdents
+60 common getdents64 sys_getdents64
+61 common fcntl64 sys_fcntl64
+62 common fallocate sys_fallocate
+63 common fadvise64_64 xtensa_fadvise64_64
+64 common utime sys_utime32
+65 common utimes sys_utimes_time32
+66 common ioctl sys_ioctl
+67 common fcntl sys_fcntl
+68 common setxattr sys_setxattr
+69 common getxattr sys_getxattr
+70 common listxattr sys_listxattr
+71 common removexattr sys_removexattr
+72 common lsetxattr sys_lsetxattr
+73 common lgetxattr sys_lgetxattr
+74 common llistxattr sys_llistxattr
+75 common lremovexattr sys_lremovexattr
+76 common fsetxattr sys_fsetxattr
+77 common fgetxattr sys_fgetxattr
+78 common flistxattr sys_flistxattr
+79 common fremovexattr sys_fremovexattr
+# File Map / Shared Memory Operations
+80 common mmap2 sys_mmap_pgoff
+81 common munmap sys_munmap
+82 common mprotect sys_mprotect
+83 common brk sys_brk
+84 common mlock sys_mlock
+85 common munlock sys_munlock
+86 common mlockall sys_mlockall
+87 common munlockall sys_munlockall
+88 common mremap sys_mremap
+89 common msync sys_msync
+90 common mincore sys_mincore
+91 common madvise sys_madvise
+92 common shmget sys_shmget
+93 common shmat xtensa_shmat
+94 common shmctl sys_old_shmctl
+95 common shmdt sys_shmdt
+# Socket Operations
+96 common socket sys_socket
+97 common setsockopt sys_setsockopt
+98 common getsockopt sys_getsockopt
+99 common shutdown sys_shutdown
+100 common bind sys_bind
+101 common connect sys_connect
+102 common listen sys_listen
+103 common accept sys_accept
+104 common getsockname sys_getsockname
+105 common getpeername sys_getpeername
+106 common sendmsg sys_sendmsg
+107 common recvmsg sys_recvmsg
+108 common send sys_send
+109 common recv sys_recv
+110 common sendto sys_sendto
+111 common recvfrom sys_recvfrom
+112 common socketpair sys_socketpair
+113 common sendfile sys_sendfile
+114 common sendfile64 sys_sendfile64
+115 common sendmmsg sys_sendmmsg
+# Process Operations
+116 common clone sys_clone
+117 common execve sys_execve
+118 common exit sys_exit
+119 common exit_group sys_exit_group
+120 common getpid sys_getpid
+121 common wait4 sys_wait4
+122 common waitid sys_waitid
+123 common kill sys_kill
+124 common tkill sys_tkill
+125 common tgkill sys_tgkill
+126 common set_tid_address sys_set_tid_address
+127 common gettid sys_gettid
+128 common setsid sys_setsid
+129 common getsid sys_getsid
+130 common prctl sys_prctl
+131 common personality sys_personality
+132 common getpriority sys_getpriority
+133 common setpriority sys_setpriority
+134 common setitimer sys_setitimer
+135 common getitimer sys_getitimer
+136 common setuid sys_setuid
+137 common getuid sys_getuid
+138 common setgid sys_setgid
+139 common getgid sys_getgid
+140 common geteuid sys_geteuid
+141 common getegid sys_getegid
+142 common setreuid sys_setreuid
+143 common setregid sys_setregid
+144 common setresuid sys_setresuid
+145 common getresuid sys_getresuid
+146 common setresgid sys_setresgid
+147 common getresgid sys_getresgid
+148 common setpgid sys_setpgid
+149 common getpgid sys_getpgid
+150 common getppid sys_getppid
+151 common getpgrp sys_getpgrp
+# 152 was set_thread_area
+152 common reserved152 sys_ni_syscall
+# 153 was get_thread_area
+153 common reserved153 sys_ni_syscall
+154 common times sys_times
+155 common acct sys_acct
+156 common sched_setaffinity sys_sched_setaffinity
+157 common sched_getaffinity sys_sched_getaffinity
+158 common capget sys_capget
+159 common capset sys_capset
+160 common ptrace sys_ptrace
+161 common semtimedop sys_semtimedop_time32
+162 common semget sys_semget
+163 common semop sys_semop
+164 common semctl sys_old_semctl
+165 common available165 sys_ni_syscall
+166 common msgget sys_msgget
+167 common msgsnd sys_msgsnd
+168 common msgrcv sys_msgrcv
+169 common msgctl sys_old_msgctl
+170 common available170 sys_ni_syscall
+# File System
+171 common umount2 sys_umount
+172 common mount sys_mount
+173 common swapon sys_swapon
+174 common chroot sys_chroot
+175 common pivot_root sys_pivot_root
+176 common umount sys_oldumount
+177 common swapoff sys_swapoff
+178 common sync sys_sync
+179 common syncfs sys_syncfs
+180 common setfsuid sys_setfsuid
+181 common setfsgid sys_setfsgid
+182 common sysfs sys_sysfs
+183 common ustat sys_ustat
+184 common statfs sys_statfs
+185 common fstatfs sys_fstatfs
+186 common statfs64 sys_statfs64
+187 common fstatfs64 sys_fstatfs64
+# System
+188 common setrlimit sys_setrlimit
+189 common getrlimit sys_getrlimit
+190 common getrusage sys_getrusage
+191 common futex sys_futex_time32
+192 common gettimeofday sys_gettimeofday
+193 common settimeofday sys_settimeofday
+194 common adjtimex sys_adjtimex_time32
+195 common nanosleep sys_nanosleep_time32
+196 common getgroups sys_getgroups
+197 common setgroups sys_setgroups
+198 common sethostname sys_sethostname
+199 common setdomainname sys_setdomainname
+200 common syslog sys_syslog
+201 common vhangup sys_vhangup
+202 common uselib sys_uselib
+203 common reboot sys_reboot
+204 common quotactl sys_quotactl
+# 205 was old nfsservctl
+205 common nfsservctl sys_ni_syscall
+206 common _sysctl sys_ni_syscall
+207 common bdflush sys_ni_syscall
+208 common uname sys_newuname
+209 common sysinfo sys_sysinfo
+210 common init_module sys_init_module
+211 common delete_module sys_delete_module
+212 common sched_setparam sys_sched_setparam
+213 common sched_getparam sys_sched_getparam
+214 common sched_setscheduler sys_sched_setscheduler
+215 common sched_getscheduler sys_sched_getscheduler
+216 common sched_get_priority_max sys_sched_get_priority_max
+217 common sched_get_priority_min sys_sched_get_priority_min
+218 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+219 common sched_yield sys_sched_yield
+222 common available222 sys_ni_syscall
+# Signal Handling
+223 common restart_syscall sys_restart_syscall
+224 common sigaltstack sys_sigaltstack
+225 common rt_sigreturn xtensa_rt_sigreturn
+226 common rt_sigaction sys_rt_sigaction
+227 common rt_sigprocmask sys_rt_sigprocmask
+228 common rt_sigpending sys_rt_sigpending
+229 common rt_sigtimedwait sys_rt_sigtimedwait_time32
+230 common rt_sigqueueinfo sys_rt_sigqueueinfo
+231 common rt_sigsuspend sys_rt_sigsuspend
+# Message
+232 common mq_open sys_mq_open
+233 common mq_unlink sys_mq_unlink
+234 common mq_timedsend sys_mq_timedsend_time32
+235 common mq_timedreceive sys_mq_timedreceive_time32
+236 common mq_notify sys_mq_notify
+237 common mq_getsetattr sys_mq_getsetattr
+238 common available238 sys_ni_syscall
+239 common io_setup sys_io_setup
+# IO
+240 common io_destroy sys_io_destroy
+241 common io_submit sys_io_submit
+242 common io_getevents sys_io_getevents_time32
+243 common io_cancel sys_io_cancel
+244 common clock_settime sys_clock_settime32
+245 common clock_gettime sys_clock_gettime32
+246 common clock_getres sys_clock_getres_time32
+247 common clock_nanosleep sys_clock_nanosleep_time32
+# Timer
+248 common timer_create sys_timer_create
+249 common timer_delete sys_timer_delete
+250 common timer_settime sys_timer_settime32
+251 common timer_gettime sys_timer_gettime32
+252 common timer_getoverrun sys_timer_getoverrun
+# System
+253 common reserved253 sys_ni_syscall
+254 common lookup_dcookie sys_lookup_dcookie
+255 common available255 sys_ni_syscall
+256 common add_key sys_add_key
+257 common request_key sys_request_key
+258 common keyctl sys_keyctl
+259 common available259 sys_ni_syscall
+260 common readahead sys_readahead
+261 common remap_file_pages sys_remap_file_pages
+262 common migrate_pages sys_migrate_pages
+263 common mbind sys_mbind
+264 common get_mempolicy sys_get_mempolicy
+265 common set_mempolicy sys_set_mempolicy
+266 common unshare sys_unshare
+267 common move_pages sys_move_pages
+268 common splice sys_splice
+269 common tee sys_tee
+270 common vmsplice sys_vmsplice
+271 common available271 sys_ni_syscall
+272 common pselect6 sys_pselect6_time32
+273 common ppoll sys_ppoll_time32
+274 common epoll_pwait sys_epoll_pwait
+275 common epoll_create1 sys_epoll_create1
+276 common inotify_init sys_inotify_init
+277 common inotify_add_watch sys_inotify_add_watch
+278 common inotify_rm_watch sys_inotify_rm_watch
+279 common inotify_init1 sys_inotify_init1
+280 common getcpu sys_getcpu
+281 common kexec_load sys_ni_syscall
+282 common ioprio_set sys_ioprio_set
+283 common ioprio_get sys_ioprio_get
+284 common set_robust_list sys_set_robust_list
+285 common get_robust_list sys_get_robust_list
+286 common available286 sys_ni_syscall
+287 common available287 sys_ni_syscall
+# Relative File Operations
+288 common openat sys_openat
+289 common mkdirat sys_mkdirat
+290 common mknodat sys_mknodat
+291 common unlinkat sys_unlinkat
+292 common renameat sys_renameat
+293 common linkat sys_linkat
+294 common symlinkat sys_symlinkat
+295 common readlinkat sys_readlinkat
+296 common utimensat sys_utimensat_time32
+297 common fchownat sys_fchownat
+298 common futimesat sys_futimesat_time32
+299 common fstatat64 sys_fstatat64
+300 common fchmodat sys_fchmodat
+301 common faccessat sys_faccessat
+302 common available302 sys_ni_syscall
+303 common available303 sys_ni_syscall
+304 common signalfd sys_signalfd
+# 305 was timerfd
+306 common eventfd sys_eventfd
+307 common recvmmsg sys_recvmmsg_time32
+308 common setns sys_setns
+309 common signalfd4 sys_signalfd4
+310 common dup3 sys_dup3
+311 common pipe2 sys_pipe2
+312 common timerfd_create sys_timerfd_create
+313 common timerfd_settime sys_timerfd_settime32
+314 common timerfd_gettime sys_timerfd_gettime32
+315 common available315 sys_ni_syscall
+316 common eventfd2 sys_eventfd2
+317 common preadv sys_preadv
+318 common pwritev sys_pwritev
+319 common available319 sys_ni_syscall
+320 common fanotify_init sys_fanotify_init
+321 common fanotify_mark sys_fanotify_mark
+322 common process_vm_readv sys_process_vm_readv
+323 common process_vm_writev sys_process_vm_writev
+324 common name_to_handle_at sys_name_to_handle_at
+325 common open_by_handle_at sys_open_by_handle_at
+326 common sync_file_range2 sys_sync_file_range2
+327 common perf_event_open sys_perf_event_open
+328 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+329 common clock_adjtime sys_clock_adjtime32
+330 common prlimit64 sys_prlimit64
+331 common kcmp sys_kcmp
+332 common finit_module sys_finit_module
+333 common accept4 sys_accept4
+334 common sched_setattr sys_sched_setattr
+335 common sched_getattr sys_sched_getattr
+336 common renameat2 sys_renameat2
+337 common seccomp sys_seccomp
+338 common getrandom sys_getrandom
+339 common memfd_create sys_memfd_create
+340 common bpf sys_bpf
+341 common execveat sys_execveat
+342 common userfaultfd sys_userfaultfd
+343 common membarrier sys_membarrier
+344 common mlock2 sys_mlock2
+345 common copy_file_range sys_copy_file_range
+346 common preadv2 sys_preadv2
+347 common pwritev2 sys_pwritev2
+348 common pkey_mprotect sys_pkey_mprotect
+349 common pkey_alloc sys_pkey_alloc
+350 common pkey_free sys_pkey_free
+351 common statx sys_statx
+352 common rseq sys_rseq
+# 353 through 402 are unassigned to sync up with generic numbers
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6
+414 common ppoll_time64 sys_ppoll
+416 common io_pgetevents_time64 sys_io_pgetevents
+417 common recvmmsg_time64 sys_recvmmsg
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
new file mode 100644
index 000000000..16b8a6273
--- /dev/null
+++ b/arch/xtensa/kernel/time.c
@@ -0,0 +1,214 @@
+/*
+ * arch/xtensa/kernel/time.c
+ *
+ * Timer and clock support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/clk.h>
+#include <linux/of_clk.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+#include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/sched_clock.h>
+
+#include <asm/timex.h>
+#include <asm/platform.h>
+
+unsigned long ccount_freq; /* ccount Hz */
+EXPORT_SYMBOL(ccount_freq);
+
+static u64 ccount_read(struct clocksource *cs)
+{
+ return (u64)get_ccount();
+}
+
+static u64 notrace ccount_sched_clock_read(void)
+{
+ return get_ccount();
+}
+
+static struct clocksource ccount_clocksource = {
+ .name = "ccount",
+ .rating = 200,
+ .read = ccount_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+struct ccount_timer {
+ struct clock_event_device evt;
+ int irq_enabled;
+ char name[24];
+};
+
+static int ccount_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ unsigned long flags, next;
+ int ret = 0;
+
+ local_irq_save(flags);
+ next = get_ccount() + delta;
+ set_linux_timer(next);
+ if (next - get_ccount() > delta)
+ ret = -ETIME;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+/*
+ * There is no way to disable the timer interrupt at the device level,
+ * only at the intenable register itself. Since enable_irq/disable_irq
+ * calls are nested, we need to make sure that these calls are
+ * balanced.
+ */
+static int ccount_timer_shutdown(struct clock_event_device *evt)
+{
+ struct ccount_timer *timer =
+ container_of(evt, struct ccount_timer, evt);
+
+ if (timer->irq_enabled) {
+ disable_irq_nosync(evt->irq);
+ timer->irq_enabled = 0;
+ }
+ return 0;
+}
+
+static int ccount_timer_set_oneshot(struct clock_event_device *evt)
+{
+ struct ccount_timer *timer =
+ container_of(evt, struct ccount_timer, evt);
+
+ if (!timer->irq_enabled) {
+ enable_irq(evt->irq);
+ timer->irq_enabled = 1;
+ }
+ return 0;
+}
+
+static DEFINE_PER_CPU(struct ccount_timer, ccount_timer) = {
+ .evt = {
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 300,
+ .set_next_event = ccount_timer_set_next_event,
+ .set_state_shutdown = ccount_timer_shutdown,
+ .set_state_oneshot = ccount_timer_set_oneshot,
+ .tick_resume = ccount_timer_set_oneshot,
+ },
+};
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
+
+ set_linux_timer(get_linux_timer());
+ evt->event_handler(evt);
+
+ /* Allow platform to do something useful (Wdog). */
+ platform_heartbeat();
+
+ return IRQ_HANDLED;
+}
+
+void local_timer_setup(unsigned cpu)
+{
+ struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
+ struct clock_event_device *clockevent = &timer->evt;
+
+ timer->irq_enabled = 1;
+ snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
+ clockevent->name = timer->name;
+ clockevent->cpumask = cpumask_of(cpu);
+ clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
+ if (WARN(!clockevent->irq, "error: can't map timer irq"))
+ return;
+ clockevents_config_and_register(clockevent, ccount_freq,
+ 0xf, 0xffffffff);
+}
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+#ifdef CONFIG_OF
+static void __init calibrate_ccount(void)
+{
+ struct device_node *cpu;
+ struct clk *clk;
+
+ cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
+ if (cpu) {
+ clk = of_clk_get(cpu, 0);
+ of_node_put(cpu);
+ if (!IS_ERR(clk)) {
+ ccount_freq = clk_get_rate(clk);
+ return;
+ } else {
+ pr_warn("%s: CPU input clock not found\n",
+ __func__);
+ }
+ } else {
+ pr_warn("%s: CPU node not found in the device tree\n",
+ __func__);
+ }
+
+ platform_calibrate_ccount();
+}
+#else
+static inline void calibrate_ccount(void)
+{
+ platform_calibrate_ccount();
+}
+#endif
+#endif
+
+void __init time_init(void)
+{
+ int irq;
+
+ of_clk_init(NULL);
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+ pr_info("Calibrating CPU frequency ");
+ calibrate_ccount();
+ pr_cont("%d.%02d MHz\n",
+ (int)ccount_freq / 1000000,
+ (int)(ccount_freq / 10000) % 100);
+#else
+ ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
+#endif
+ WARN(!ccount_freq,
+ "%s: CPU clock frequency is not set up correctly\n",
+ __func__);
+ clocksource_register_hz(&ccount_clocksource, ccount_freq);
+ local_timer_setup(0);
+ irq = this_cpu_ptr(&ccount_timer)->evt.irq;
+ if (request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL))
+ pr_err("Failed to request irq %d (timer)\n", irq);
+ sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
+ timer_probe();
+}
+
+#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
+void calibrate_delay(void)
+{
+ loops_per_jiffy = ccount_freq / HZ;
+ pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
+ loops_per_jiffy / (1000000 / HZ),
+ (loops_per_jiffy / (10000 / HZ)) % 100);
+}
+#endif
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
new file mode 100644
index 000000000..9e9ade20a
--- /dev/null
+++ b/arch/xtensa/kernel/traps.c
@@ -0,0 +1,595 @@
+/*
+ * arch/xtensa/kernel/traps.c
+ *
+ * Exception handling.
+ *
+ * Derived from code with the following copyrights:
+ * Copyright (C) 1994 - 1999 by Ralf Baechle
+ * Modified for R3000 by Paul M. Antoine, 1995, 1996
+ * Complete output from die() by Ulf Carlsson, 1998
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ *
+ * Essentially rewritten for the Xtensa architecture port.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include <linux/kallsyms.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/ratelimit.h>
+#include <linux/pgtable.h>
+
+#include <asm/stacktrace.h>
+#include <asm/ptrace.h>
+#include <asm/timex.h>
+#include <linux/uaccess.h>
+#include <asm/processor.h>
+#include <asm/traps.h>
+#include <asm/hw_breakpoint.h>
+
+/*
+ * Machine specific interrupt handlers
+ */
+
+static void do_illegal_instruction(struct pt_regs *regs);
+static void do_div0(struct pt_regs *regs);
+static void do_interrupt(struct pt_regs *regs);
+#if XTENSA_FAKE_NMI
+static void do_nmi(struct pt_regs *regs);
+#endif
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+static void do_unaligned_user(struct pt_regs *regs);
+#endif
+static void do_multihit(struct pt_regs *regs);
+#if XTENSA_HAVE_COPROCESSORS
+static void do_coprocessor(struct pt_regs *regs);
+#endif
+static void do_debug(struct pt_regs *regs);
+
+/*
+ * The vector table must be preceded by a save area (which
+ * implies it must be in RAM, unless one places RAM immediately
+ * before a ROM and puts the vector at the start of the ROM (!))
+ */
+
+#define KRNL 0x01
+#define USER 0x02
+
+#define COPROCESSOR(x) \
+{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER|KRNL, fast_coprocessor },\
+{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, 0, do_coprocessor }
+
+typedef struct {
+ int cause;
+ int fast;
+ void* handler;
+} dispatch_init_table_t;
+
+static dispatch_init_table_t __initdata dispatch_init_table[] = {
+
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+{ EXCCAUSE_ILLEGAL_INSTRUCTION, USER, fast_illegal_instruction_user },
+#endif
+{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
+{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
+{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
+/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
+/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
+{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
+#ifdef SUPPORT_WINDOWED
+{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
+#endif
+{ EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0, do_div0 },
+/* EXCCAUSE_PRIVILEGED unhandled */
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+#ifdef CONFIG_XTENSA_UNALIGNED_USER
+{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
+#endif
+{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
+{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
+#endif
+#ifdef CONFIG_MMU
+{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
+{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
+#endif /* CONFIG_MMU */
+#ifdef CONFIG_PFAULT
+{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
+{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
+{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
+{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
+{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
+{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
+#endif
+/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
+#if XTENSA_HAVE_COPROCESSOR(0)
+COPROCESSOR(0),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(1)
+COPROCESSOR(1),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(2)
+COPROCESSOR(2),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(3)
+COPROCESSOR(3),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(4)
+COPROCESSOR(4),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(5)
+COPROCESSOR(5),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(6)
+COPROCESSOR(6),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(7)
+COPROCESSOR(7),
+#endif
+#if XTENSA_FAKE_NMI
+{ EXCCAUSE_MAPPED_NMI, 0, do_nmi },
+#endif
+{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
+{ -1, -1, 0 }
+
+};
+
+/* The exception table <exc_table> serves two functions:
+ * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
+ * 2. it is a temporary memory buffer for the exception handlers.
+ */
+
+DEFINE_PER_CPU(struct exc_table, exc_table);
+DEFINE_PER_CPU(struct debug_table, debug_table);
+
+void die(const char*, struct pt_regs*, long);
+
+static inline void
+__die_if_kernel(const char *str, struct pt_regs *regs, long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+/*
+ * Unhandled Exceptions. Kill user task or panic if in kernel space.
+ */
+
+void do_unhandled(struct pt_regs *regs)
+{
+ __die_if_kernel("Caught unhandled exception - should not happen",
+ regs, SIGKILL);
+
+ /* If in user mode, send SIGILL signal to current process */
+ pr_info_ratelimited("Caught unhandled exception in '%s' "
+ "(pid = %d, pc = %#010lx) - should not happen\n"
+ "\tEXCCAUSE is %ld\n",
+ current->comm, task_pid_nr(current), regs->pc,
+ regs->exccause);
+ force_sig(SIGILL);
+}
+
+/*
+ * Multi-hit exception. This if fatal!
+ */
+
+static void do_multihit(struct pt_regs *regs)
+{
+ die("Caught multihit exception", regs, SIGKILL);
+}
+
+/*
+ * IRQ handler.
+ */
+
+#if XTENSA_FAKE_NMI
+
+#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+
+#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
+ IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
+#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
+#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."
+
+static inline void check_valid_nmi(void)
+{
+ unsigned intread = xtensa_get_sr(interrupt);
+ unsigned intenable = xtensa_get_sr(intenable);
+
+ BUG_ON(intread & intenable &
+ ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
+ XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
+ BIT(XCHAL_PROFILING_INTERRUPT)));
+}
+
+#else
+
+static inline void check_valid_nmi(void)
+{
+}
+
+#endif
+
+irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
+
+DEFINE_PER_CPU(unsigned long, nmi_count);
+
+static void do_nmi(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ nmi_enter();
+ ++*this_cpu_ptr(&nmi_count);
+ check_valid_nmi();
+ xtensa_pmu_irq_handler(0, NULL);
+ nmi_exit();
+ set_irq_regs(old_regs);
+}
+#endif
+
+static void do_interrupt(struct pt_regs *regs)
+{
+ static const unsigned int_level_mask[] = {
+ 0,
+ XCHAL_INTLEVEL1_MASK,
+ XCHAL_INTLEVEL2_MASK,
+ XCHAL_INTLEVEL3_MASK,
+ XCHAL_INTLEVEL4_MASK,
+ XCHAL_INTLEVEL5_MASK,
+ XCHAL_INTLEVEL6_MASK,
+ XCHAL_INTLEVEL7_MASK,
+ };
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ unsigned unhandled = ~0u;
+
+ irq_enter();
+
+ for (;;) {
+ unsigned intread = xtensa_get_sr(interrupt);
+ unsigned intenable = xtensa_get_sr(intenable);
+ unsigned int_at_level = intread & intenable;
+ unsigned level;
+
+ for (level = LOCKLEVEL; level > 0; --level) {
+ if (int_at_level & int_level_mask[level]) {
+ int_at_level &= int_level_mask[level];
+ if (int_at_level & unhandled)
+ int_at_level &= unhandled;
+ else
+ unhandled |= int_level_mask[level];
+ break;
+ }
+ }
+
+ if (level == 0)
+ break;
+
+ /* clear lowest pending irq in the unhandled mask */
+ unhandled ^= (int_at_level & -int_at_level);
+ do_IRQ(__ffs(int_at_level), regs);
+ }
+
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+static bool check_div0(struct pt_regs *regs)
+{
+ static const u8 pattern[] = {'D', 'I', 'V', '0'};
+ const u8 *p;
+ u8 buf[5];
+
+ if (user_mode(regs)) {
+ if (copy_from_user(buf, (void __user *)regs->pc + 2, 5))
+ return false;
+ p = buf;
+ } else {
+ p = (const u8 *)regs->pc + 2;
+ }
+
+ return memcmp(p, pattern, sizeof(pattern)) == 0 ||
+ memcmp(p + 1, pattern, sizeof(pattern)) == 0;
+}
+
+/*
+ * Illegal instruction. Fatal if in kernel space.
+ */
+
+static void do_illegal_instruction(struct pt_regs *regs)
+{
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
+ /*
+ * When call0 application encounters an illegal instruction fast
+ * exception handler will attempt to set PS.WOE and retry failing
+ * instruction.
+ * If we get here we know that that instruction is also illegal
+ * with PS.WOE set, so it's not related to the windowed option
+ * hence PS.WOE may be cleared.
+ */
+ if (regs->pc == current_thread_info()->ps_woe_fix_addr)
+ regs->ps &= ~PS_WOE_MASK;
+#endif
+ if (check_div0(regs)) {
+ do_div0(regs);
+ return;
+ }
+
+ __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
+
+ /* If in user mode, send SIGILL signal to current process. */
+
+ pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
+ current->comm, task_pid_nr(current), regs->pc);
+ force_sig(SIGILL);
+}
+
+static void do_div0(struct pt_regs *regs)
+{
+ __die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL);
+ force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc);
+}
+
+/*
+ * Handle unaligned memory accesses from user space. Kill task.
+ *
+ * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
+ * accesses causes from user space.
+ */
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+static void do_unaligned_user(struct pt_regs *regs)
+{
+ __die_if_kernel("Unhandled unaligned exception in kernel",
+ regs, SIGKILL);
+
+ current->thread.bad_vaddr = regs->excvaddr;
+ current->thread.error_code = -3;
+ pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
+ "(pid = %d, pc = %#010lx)\n",
+ regs->excvaddr, current->comm,
+ task_pid_nr(current), regs->pc);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr);
+}
+#endif
+
+#if XTENSA_HAVE_COPROCESSORS
+static void do_coprocessor(struct pt_regs *regs)
+{
+ coprocessor_flush_release_all(current_thread_info());
+}
+#endif
+
+/* Handle debug events.
+ * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with
+ * preemption disabled to avoid rescheduling and keep mapping of hardware
+ * breakpoint structures to debug registers intact, so that
+ * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit.
+ */
+static void do_debug(struct pt_regs *regs)
+{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int ret = check_hw_breakpoint(regs);
+
+ preempt_enable();
+ if (ret == 0)
+ return;
+#endif
+ __die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
+
+ /* If in user mode, send SIGTRAP signal to current process */
+
+ force_sig(SIGTRAP);
+}
+
+
+#define set_handler(type, cause, handler) \
+ do { \
+ unsigned int cpu; \
+ \
+ for_each_possible_cpu(cpu) \
+ per_cpu(exc_table, cpu).type[cause] = (handler);\
+ } while (0)
+
+/* Set exception C handler - for temporary use when probing exceptions */
+
+xtensa_exception_handler *
+__init trap_set_handler(int cause, xtensa_exception_handler *handler)
+{
+ void *previous = per_cpu(exc_table, 0).default_handler[cause];
+
+ set_handler(default_handler, cause, handler);
+ return previous;
+}
+
+
+static void trap_init_excsave(void)
+{
+ xtensa_set_sr(this_cpu_ptr(&exc_table), excsave1);
+}
+
+static void trap_init_debug(void)
+{
+ unsigned long debugsave = (unsigned long)this_cpu_ptr(&debug_table);
+
+ this_cpu_ptr(&debug_table)->debug_exception = debug_exception;
+ __asm__ __volatile__("wsr %0, excsave" __stringify(XCHAL_DEBUGLEVEL)
+ :: "a"(debugsave));
+}
+
+/*
+ * Initialize dispatch tables.
+ *
+ * The exception vectors are stored compressed the __init section in the
+ * dispatch_init_table. This function initializes the following three tables
+ * from that compressed table:
+ * - fast user first dispatch table for user exceptions
+ * - fast kernel first dispatch table for kernel exceptions
+ * - default C-handler C-handler called by the default fast handler.
+ *
+ * See vectors.S for more details.
+ */
+
+void __init trap_init(void)
+{
+ int i;
+
+ /* Setup default vectors. */
+
+ for (i = 0; i < EXCCAUSE_N; i++) {
+ set_handler(fast_user_handler, i, user_exception);
+ set_handler(fast_kernel_handler, i, kernel_exception);
+ set_handler(default_handler, i, do_unhandled);
+ }
+
+ /* Setup specific handlers. */
+
+ for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
+ int fast = dispatch_init_table[i].fast;
+ int cause = dispatch_init_table[i].cause;
+ void *handler = dispatch_init_table[i].handler;
+
+ if (fast == 0)
+ set_handler(default_handler, cause, handler);
+ if ((fast & USER) != 0)
+ set_handler(fast_user_handler, cause, handler);
+ if ((fast & KRNL) != 0)
+ set_handler(fast_kernel_handler, cause, handler);
+ }
+
+ /* Initialize EXCSAVE_1 to hold the address of the exception table. */
+ trap_init_excsave();
+ trap_init_debug();
+}
+
+#ifdef CONFIG_SMP
+void secondary_trap_init(void)
+{
+ trap_init_excsave();
+ trap_init_debug();
+}
+#endif
+
+/*
+ * This function dumps the current valid window frame and other base registers.
+ */
+
+void show_regs(struct pt_regs * regs)
+{
+ int i;
+
+ show_regs_print_info(KERN_DEFAULT);
+
+ for (i = 0; i < 16; i++) {
+ if ((i % 8) == 0)
+ pr_info("a%02d:", i);
+ pr_cont(" %08lx", regs->areg[i]);
+ }
+ pr_cont("\n");
+ pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
+ regs->pc, regs->ps, regs->depc, regs->excvaddr);
+ pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
+ regs->lbeg, regs->lend, regs->lcount, regs->sar);
+ if (user_mode(regs))
+ pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
+ regs->windowbase, regs->windowstart, regs->wmask,
+ regs->syscall);
+}
+
+static int show_trace_cb(struct stackframe *frame, void *data)
+{
+ const char *loglvl = data;
+
+ if (kernel_text_address(frame->pc))
+ printk("%s [<%08lx>] %pB\n",
+ loglvl, frame->pc, (void *)frame->pc);
+ return 0;
+}
+
+static void show_trace(struct task_struct *task, unsigned long *sp,
+ const char *loglvl)
+{
+ if (!sp)
+ sp = stack_pointer(task);
+
+ printk("%sCall Trace:\n", loglvl);
+ walk_stackframe(sp, show_trace_cb, (void *)loglvl);
+}
+
+#define STACK_DUMP_ENTRY_SIZE 4
+#define STACK_DUMP_LINE_SIZE 32
+static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ size_t len, off = 0;
+
+ if (!sp)
+ sp = stack_pointer(task);
+
+ len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
+ kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
+
+ printk("%sStack:\n", loglvl);
+ while (off < len) {
+ u8 line[STACK_DUMP_LINE_SIZE];
+ size_t line_len = len - off > STACK_DUMP_LINE_SIZE ?
+ STACK_DUMP_LINE_SIZE : len - off;
+
+ __memcpy(line, (u8 *)sp + off, line_len);
+ print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
+ STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
+ line, line_len, false);
+ off += STACK_DUMP_LINE_SIZE;
+ }
+ show_trace(task, sp, loglvl);
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+void __noreturn die(const char * str, struct pt_regs * regs, long err)
+{
+ static int die_counter;
+ const char *pr = "";
+
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
+
+ console_verbose();
+ spin_lock_irq(&die_lock);
+
+ pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
+ show_regs(regs);
+ if (!user_mode(regs))
+ show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO);
+
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irq(&die_lock);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ make_task_dead(err);
+}
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
new file mode 100644
index 000000000..1073fe4a5
--- /dev/null
+++ b/arch/xtensa/kernel/vectors.S
@@ -0,0 +1,805 @@
+/*
+ * arch/xtensa/kernel/vectors.S
+ *
+ * This file contains all exception vectors (user, kernel, and double),
+ * as well as the window vectors (overflow and underflow), and the debug
+ * vector. These are the primary vectors executed by the processor if an
+ * exception occurs.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2005 - 2008 Tensilica, Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+/*
+ * We use a two-level table approach. The user and kernel exception vectors
+ * use a first-level dispatch table to dispatch the exception to a registered
+ * fast handler or the default handler, if no fast handler was registered.
+ * The default handler sets up a C-stack and dispatches the exception to a
+ * registerd C handler in the second-level dispatch table.
+ *
+ * Fast handler entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original value in depc
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * The value for PT_DEPC saved to stack also functions as a boolean to
+ * indicate that the exception is either a double or a regular exception:
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Note: Neither the kernel nor the user exception handler generate literals.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <linux/pgtable.h>
+#include <asm/asmmacro.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <asm/vectors.h>
+
+#define WINDOW_VECTORS_SIZE 0x180
+
+
+/*
+ * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
+ *
+ * We get here when an exception occurred while we were in userland.
+ * We switch to the kernel stack and jump to the first level handler
+ * associated to the exception cause.
+ *
+ * Note: the saved kernel stack pointer (EXC_TABLE_KSTK) is already
+ * decremented by PT_USER_SIZE.
+ */
+
+ .section .UserExceptionVector.text, "ax"
+
+ENTRY(_UserExceptionVector)
+
+ xsr a3, excsave1 # save a3 and get dispatch table
+ wsr a2, depc # save a2
+ l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
+ s32i a0, a2, PT_AREG0 # save a0 to ESF
+ rsr a0, exccause # retrieve exception cause
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ xsr a3, excsave1 # restore a3 and dispatch table
+ jx a0
+
+ENDPROC(_UserExceptionVector)
+
+/*
+ * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
+ *
+ * We get this exception when we were already in kernel space.
+ * We decrement the current stack pointer (kernel) by PT_KERNEL_SIZE and
+ * jump to the first-level handler associated with the exception cause.
+ *
+ * Note: we need to preserve space for the spill region.
+ */
+
+ .section .KernelExceptionVector.text, "ax"
+
+ENTRY(_KernelExceptionVector)
+
+ xsr a3, excsave1 # save a3, and get dispatch table
+ wsr a2, depc # save a2
+ addi a2, a1, -16 - PT_KERNEL_SIZE # adjust stack pointer
+ s32i a0, a2, PT_AREG0 # save a0 to ESF
+ rsr a0, exccause # retrieve exception cause
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
+ xsr a3, excsave1 # restore a3 and dispatch table
+ jx a0
+
+ENDPROC(_KernelExceptionVector)
+
+/*
+ * Double exception vector (Exceptions with PS.EXCM == 1)
+ * We get this exception when another exception occurs while were are
+ * already in an exception, such as window overflow/underflow exception,
+ * or 'expected' exceptions, for example memory exception when we were trying
+ * to read data from an invalid address in user space.
+ *
+ * Note that this vector is never invoked for level-1 interrupts, because such
+ * interrupts are disabled (masked) when PS.EXCM is set.
+ *
+ * We decode the exception and take the appropriate action. However, the
+ * double exception vector is much more careful, because a lot more error
+ * cases go through the double exception vector than through the user and
+ * kernel exception vectors.
+ *
+ * Occasionally, the kernel expects a double exception to occur. This usually
+ * happens when accessing user-space memory with the user's permissions
+ * (l32e/s32e instructions). The kernel state, though, is not always suitable
+ * for immediate transfer of control to handle_double, where "normal" exception
+ * processing occurs. Also in kernel mode, TLB misses can occur if accessing
+ * vmalloc memory, possibly requiring repair in a double exception handler.
+ *
+ * The variable at TABLE_FIXUP offset from the pointer in EXCSAVE_1 doubles as
+ * a boolean variable and a pointer to a fixup routine. If the variable
+ * EXC_TABLE_FIXUP is non-zero, this handler jumps to that address. A value of
+ * zero indicates to use the default kernel/user exception handler.
+ * There is only one exception, when the value is identical to the exc_table
+ * label, the kernel is in trouble. This mechanism is used to protect critical
+ * sections, mainly when the handler writes to the stack to assert the stack
+ * pointer is valid. Once the fixup/default handler leaves that area, the
+ * EXC_TABLE_FIXUP variable is reset to the fixup handler or zero.
+ *
+ * Procedures wishing to use this mechanism should set EXC_TABLE_FIXUP to the
+ * nonzero address of a fixup routine before it could cause a double exception
+ * and reset it before it returns.
+ *
+ * Some other things to take care of when a fast exception handler doesn't
+ * specify a particular fixup handler but wants to use the default handlers:
+ *
+ * - The original stack pointer (in a1) must not be modified. The fast
+ * exception handler should only use a2 as the stack pointer.
+ *
+ * - If the fast handler manipulates the stack pointer (in a2), it has to
+ * register a valid fixup handler and cannot use the default handlers.
+ *
+ * - The handler can use any other generic register from a3 to a15, but it
+ * must save the content of these registers to stack (PT_AREG3...PT_AREGx)
+ *
+ * - These registers must be saved before a double exception can occur.
+ *
+ * - If we ever implement handling signals while in double exceptions, the
+ * number of registers a fast handler has saved (excluding a0 and a1) must
+ * be written to PT_AREG1. (1 if only a3 is used, 2 for a3 and a4, etc. )
+ *
+ * The fixup handlers are special handlers:
+ *
+ * - Fixup entry conditions differ from regular exceptions:
+ *
+ * a0: DEPC
+ * a1: a1
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable
+ * depc: a0
+ * excsave_1: a3
+ *
+ * - When the kernel enters the fixup handler, it still assumes it is in a
+ * critical section, so EXC_TABLE_FIXUP variable is set to exc_table.
+ * The fixup handler, therefore, has to re-register itself as the fixup
+ * handler before it returns from the double exception.
+ *
+ * - Fixup handler can share the same exception frame with the fast handler.
+ * The kernel stack pointer is not changed when entering the fixup handler.
+ *
+ * - Fixup handlers can jump to the default kernel and user exception
+ * handlers. Before it jumps, though, it has to setup a exception frame
+ * on stack. Because the default handler resets the register fixup handler
+ * the fixup handler must make sure that the default handler returns to
+ * it instead of the exception address, so it can re-register itself as
+ * the fixup handler.
+ *
+ * In case of a critical condition where the kernel cannot recover, we jump
+ * to unrecoverable_exception with the following entry conditions.
+ * All registers a0...a15 are unchanged from the last exception, except:
+ *
+ * a0: last address before we jumped to the unrecoverable_exception.
+ * excsave_1: a0
+ *
+ *
+ * See the handle_alloca_user and spill_registers routines for example clients.
+ *
+ * FIXME: Note: we currently don't allow signal handling coming from a double
+ * exception, so the item markt with (*) is not required.
+ */
+
+ .section .DoubleExceptionVector.text, "ax"
+
+ENTRY(_DoubleExceptionVector)
+
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+
+ /* Check for kernel double exception (usually fatal). */
+
+ rsr a2, ps
+ _bbsi.l a2, PS_UM_BIT, 1f
+ j .Lksp
+
+ .align 4
+ .literal_position
+1:
+ /* Check if we are currently handling a window exception. */
+ /* Note: We don't need to indicate that we enter a critical section. */
+
+ xsr a0, depc # get DEPC, save a0
+
+#ifdef SUPPORT_WINDOWED
+ movi a2, WINDOW_VECTORS_VADDR
+ _bltu a0, a2, .Lfixup
+ addi a2, a2, WINDOW_VECTORS_SIZE
+ _bgeu a0, a2, .Lfixup
+
+ /* Window overflow/underflow exception. Get stack pointer. */
+
+ l32i a2, a3, EXC_TABLE_KSTK
+
+ /* Check for overflow/underflow exception, jump if overflow. */
+
+ bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
+
+ /*
+ * Restart window underflow exception.
+ * Currently:
+ * depc = orig a0,
+ * a0 = orig DEPC,
+ * a2 = new sp based on KSTK from exc_table
+ * a3 = excsave_1
+ * excsave_1 = orig a3
+ *
+ * We return to the instruction in user space that caused the window
+ * underflow exception. Therefore, we change window base to the value
+ * before we entered the window underflow exception and prepare the
+ * registers to return as if we were coming from a regular exception
+ * by changing depc (in a0).
+ * Note: We can trash the current window frame (a0...a3) and depc!
+ */
+_DoubleExceptionVector_WindowUnderflow:
+ xsr a3, excsave1
+ wsr a2, depc # save stack pointer temporarily
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ wsr a0, windowbase
+ rsync
+
+ /* We are now in the previous window frame. Save registers again. */
+
+ xsr a2, depc # save a2 and get stack pointer
+ s32i a0, a2, PT_AREG0
+ xsr a3, excsave1
+ rsr a0, exccause
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ addx4 a0, a0, a3
+ xsr a3, excsave1
+ l32i a0, a0, EXC_TABLE_FAST_USER
+ jx a0
+
+#else
+ j .Lfixup
+#endif
+
+ /*
+ * We only allow the ITLB miss exception if we are in kernel space.
+ * All other exceptions are unexpected and thus unrecoverable!
+ */
+
+#ifdef CONFIG_MMU
+ .extern fast_second_level_miss_double_kernel
+
+.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+
+ rsr a3, exccause
+ beqi a3, EXCCAUSE_ITLB_MISS, 1f
+ addi a3, a3, -EXCCAUSE_DTLB_MISS
+ bnez a3, .Lunrecoverable
+1: movi a3, fast_second_level_miss_double_kernel
+ jx a3
+#else
+.equ .Lksp, .Lunrecoverable
+#endif
+
+ /* Critical! We can't handle this situation. PANIC! */
+
+ .extern unrecoverable_exception
+
+.Lunrecoverable_fixup:
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a0, depc
+
+.Lunrecoverable:
+ rsr a3, excsave1
+ wsr a0, excsave1
+ call0 unrecoverable_exception
+
+.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
+
+ /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave1: a3 */
+
+ /* Enter critical section. */
+
+ l32i a2, a3, EXC_TABLE_FIXUP
+ s32i a3, a3, EXC_TABLE_FIXUP
+ beq a2, a3, .Lunrecoverable_fixup # critical section
+ beqz a2, .Ldflt # no handler was registered
+
+ /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
+
+ jx a2
+
+.Ldflt: /* Get stack pointer. */
+
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ addi a2, a2, -PT_USER_SIZE
+
+ /* a0: depc, a1: a1, a2: kstk, a3: exctable, depc: a0, excsave: a3 */
+
+ s32i a0, a2, PT_DEPC
+ l32i a0, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a0, depc
+ s32i a0, a2, PT_AREG0
+
+ /* a0: avail, a1: a1, a2: kstk, a3: exctable, depc: a2, excsave: a3 */
+
+ rsr a0, exccause
+ addx4 a0, a0, a3
+ xsr a3, excsave1
+ l32i a0, a0, EXC_TABLE_FAST_USER
+ jx a0
+
+#ifdef SUPPORT_WINDOWED
+ /*
+ * Restart window OVERFLOW exception.
+ * Currently:
+ * depc = orig a0,
+ * a0 = orig DEPC,
+ * a2 = new sp based on KSTK from exc_table
+ * a3 = EXCSAVE_1
+ * excsave_1 = orig a3
+ *
+ * We return to the instruction in user space that caused the window
+ * overflow exception. Therefore, we change window base to the value
+ * before we entered the window overflow exception and prepare the
+ * registers to return as if we were coming from a regular exception
+ * by changing DEPC (in a0).
+ *
+ * NOTE: We CANNOT trash the current window frame (a0...a3), but we
+ * can clobber depc.
+ *
+ * The tricky part here is that overflow8 and overflow12 handlers
+ * save a0, then clobber a0. To restart the handler, we have to restore
+ * a0 if the double exception was past the point where a0 was clobbered.
+ *
+ * To keep things simple, we take advantage of the fact all overflow
+ * handlers save a0 in their very first instruction. If DEPC was past
+ * that instruction, we can safely restore a0 from where it was saved
+ * on the stack.
+ *
+ * a0: depc, a1: a1, a2: kstk, a3: exc_table, depc: a0, excsave1: a3
+ */
+_DoubleExceptionVector_WindowOverflow:
+ extui a2, a0, 0, 6 # get offset into 64-byte vector handler
+ beqz a2, 1f # if at start of vector, don't restore
+
+ addi a0, a0, -128
+ bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
+
+ /*
+ * This fixup handler is for the extremely unlikely case where the
+ * overflow handler's reference thru a0 gets a hardware TLB refill
+ * that bumps out the (distinct, aliasing) TLB entry that mapped its
+ * prior references thru a9/a13, and where our reference now thru
+ * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+ */
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+
+ bbsi.l a0, 7, 2f
+
+ /*
+ * Restore a0 as saved by _WindowOverflow8().
+ */
+
+ l32e a0, a9, -16
+ wsr a0, depc # replace the saved a0
+ j 3f
+
+2:
+ /*
+ * Restore a0 as saved by _WindowOverflow12().
+ */
+
+ l32e a0, a13, -16
+ wsr a0, depc # replace the saved a0
+3:
+ xsr a3, excsave1
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+1:
+ /*
+ * Restore WindowBase while leaving all address registers restored.
+ * We have to use ROTW for this, because WSR.WINDOWBASE requires
+ * an address register (which would prevent restore).
+ *
+ * Window Base goes from 0 ... 7 (Module 8)
+ * Window Start is 8 bits; Ex: (0b1010 1010):0x55 from series of call4s
+ */
+
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ rsr a2, windowbase
+ sub a0, a2, a0
+ extui a0, a0, 0, 3
+
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ beqi a0, 1, .L1pane
+ beqi a0, 3, .L3pane
+
+ rsr a0, depc
+ rotw -2
+
+ /*
+ * We are now in the user code's original window frame.
+ * Process the exception as a user exception as if it was
+ * taken by the user code.
+ *
+ * This is similar to the user exception vector,
+ * except that PT_DEPC isn't set to EXCCAUSE.
+ */
+1:
+ xsr a3, excsave1
+ wsr a2, depc
+ l32i a2, a3, EXC_TABLE_KSTK
+ s32i a0, a2, PT_AREG0
+ rsr a0, exccause
+
+ s32i a0, a2, PT_DEPC
+
+_DoubleExceptionVector_handle_exception:
+ addi a0, a0, -EXCCAUSE_UNALIGNED
+ beqz a0, 2f
+ addx4 a0, a0, a3
+ l32i a0, a0, EXC_TABLE_FAST_USER + 4 * EXCCAUSE_UNALIGNED
+ xsr a3, excsave1
+ jx a0
+2:
+ movi a0, user_exception
+ xsr a3, excsave1
+ jx a0
+
+.L1pane:
+ rsr a0, depc
+ rotw -1
+ j 1b
+
+.L3pane:
+ rsr a0, depc
+ rotw -3
+ j 1b
+#endif
+
+ENDPROC(_DoubleExceptionVector)
+
+#ifdef SUPPORT_WINDOWED
+
+/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ * (we'll need to rotate window back and there's no place to save this
+ * information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ * about the conditions of the original double exception that happened in
+ * the window overflow handler is lost, so we just return to userspace to
+ * retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ __XTENSA_HANDLER
+ .literal_position
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ rsr a2, windowbase
+ sub a0, a2, a0
+ extui a0, a0, 0, 3
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+
+ _beqi a0, 1, .Lhandle_1
+ _beqi a0, 3, .Lhandle_3
+
+ .macro overflow_fixup_handle_exception_pane n
+
+ rsr a0, depc
+ rotw -\n
+
+ xsr a3, excsave1
+ wsr a2, depc
+ l32i a2, a3, EXC_TABLE_KSTK
+ s32i a0, a2, PT_AREG0
+
+ movi a0, .Lrestore_\n
+ s32i a0, a2, PT_DEPC
+ rsr a0, exccause
+ j _DoubleExceptionVector_handle_exception
+
+ .endm
+
+ overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+ overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+ overflow_fixup_handle_exception_pane 3
+
+ .macro overflow_fixup_restore_a0_pane n
+
+ rotw \n
+ /* Need to preserve a0 value here to be able to handle exception
+ * that may occur on a0 reload from stack. It may occur because
+ * TLB miss handler may not be atomic and pointer to page table
+ * may be lost before we get here. There are no free registers,
+ * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+ */
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ bbsi.l a0, 7, 1f
+ l32e a0, a9, -16
+ j 2f
+1:
+ l32e a0, a13, -16
+2:
+ rotw -\n
+
+ .endm
+
+.Lrestore_2:
+ overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, 0
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ rfe
+
+.Lrestore_1:
+ overflow_fixup_restore_a0_pane 1
+ j .Lset_default_fixup
+.Lrestore_3:
+ overflow_fixup_restore_a0_pane 3
+ j .Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+#endif
+
+/*
+ * Debug interrupt vector
+ *
+ * There is not much space here, so simply jump to another handler.
+ * EXCSAVE[DEBUGLEVEL] has been set to that handler.
+ */
+
+ .section .DebugInterruptVector.text, "ax"
+
+ENTRY(_DebugInterruptVector)
+
+ xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+ s32i a0, a3, DT_DEBUG_SAVE
+ l32i a0, a3, DT_DEBUG_EXCEPTION
+ jx a0
+
+ENDPROC(_DebugInterruptVector)
+
+
+
+/*
+ * Medium priority level interrupt vectors
+ *
+ * Each takes less than 16 (0x10) bytes, no literals, by placing
+ * the extra 8 bytes that would otherwise be required in the window
+ * vectors area where there is space. With relocatable vectors,
+ * all vectors are within ~ 4 kB range of each other, so we can
+ * simply jump (J) to another vector without having to use JX.
+ *
+ * common_exception code gets current IRQ level in PS.INTLEVEL
+ * and preserves it for the IRQ handling time.
+ */
+
+ .macro irq_entry_level level
+
+ .if XCHAL_EXCM_LEVEL >= \level
+ .section .Level\level\()InterruptVector.text, "ax"
+ENTRY(_Level\level\()InterruptVector)
+ wsr a0, excsave2
+ rsr a0, epc\level
+ wsr a0, epc1
+ .if \level <= LOCKLEVEL
+ movi a0, EXCCAUSE_LEVEL1_INTERRUPT
+ .else
+ movi a0, EXCCAUSE_MAPPED_NMI
+ .endif
+ wsr a0, exccause
+ rsr a0, eps\level
+ # branch to user or kernel vector
+ j _SimulateUserKernelVectorException
+ .endif
+
+ .endm
+
+ irq_entry_level 2
+ irq_entry_level 3
+ irq_entry_level 4
+ irq_entry_level 5
+ irq_entry_level 6
+
+#if XCHAL_EXCM_LEVEL >= 2
+ /*
+ * Continuation of medium priority interrupt dispatch code.
+ * On entry here, a0 contains PS, and EPC2 contains saved a0:
+ */
+ __XTENSA_HANDLER
+ .align 4
+_SimulateUserKernelVectorException:
+ addi a0, a0, (1 << PS_EXCM_BIT)
+#if !XTENSA_FAKE_NMI
+ wsr a0, ps
+#endif
+ bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
+ xsr a0, excsave2 # restore a0
+ j _KernelExceptionVector # simulate kernel vector exception
+1: xsr a0, excsave2 # restore a0
+ j _UserExceptionVector # simulate user vector exception
+#endif
+
+
+/* Window overflow and underflow handlers.
+ * The handlers must be 64 bytes apart, first starting with the underflow
+ * handlers underflow-4 to underflow-12, then the overflow handlers
+ * overflow-4 to overflow-12.
+ *
+ * Note: We rerun the underflow handlers if we hit an exception, so
+ * we try to access any page that would cause a page fault early.
+ */
+
+#define ENTRY_ALIGN64(name) \
+ .globl name; \
+ .align 64; \
+ name:
+
+ .section .WindowVectors.text, "ax"
+
+
+#ifdef SUPPORT_WINDOWED
+
+/* 4-Register Window Overflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowOverflow4)
+
+ s32e a0, a5, -16
+ s32e a1, a5, -12
+ s32e a2, a5, -8
+ s32e a3, a5, -4
+ rfwo
+
+ENDPROC(_WindowOverflow4)
+
+/* 4-Register Window Underflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowUnderflow4)
+
+ l32e a0, a5, -16
+ l32e a1, a5, -12
+ l32e a2, a5, -8
+ l32e a3, a5, -4
+ rfwu
+
+ENDPROC(_WindowUnderflow4)
+
+/* 8-Register Window Overflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowOverflow8)
+
+ s32e a0, a9, -16
+ l32e a0, a1, -12
+ s32e a2, a9, -8
+ s32e a1, a9, -12
+ s32e a3, a9, -4
+ s32e a4, a0, -32
+ s32e a5, a0, -28
+ s32e a6, a0, -24
+ s32e a7, a0, -20
+ rfwo
+
+ENDPROC(_WindowOverflow8)
+
+/* 8-Register Window Underflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowUnderflow8)
+
+ l32e a1, a9, -12
+ l32e a0, a9, -16
+ l32e a7, a1, -12
+ l32e a2, a9, -8
+ l32e a4, a7, -32
+ l32e a3, a9, -4
+ l32e a5, a7, -28
+ l32e a6, a7, -24
+ l32e a7, a7, -20
+ rfwu
+
+ENDPROC(_WindowUnderflow8)
+
+/* 12-Register Window Overflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowOverflow12)
+
+ s32e a0, a13, -16
+ l32e a0, a1, -12
+ s32e a1, a13, -12
+ s32e a2, a13, -8
+ s32e a3, a13, -4
+ s32e a4, a0, -48
+ s32e a5, a0, -44
+ s32e a6, a0, -40
+ s32e a7, a0, -36
+ s32e a8, a0, -32
+ s32e a9, a0, -28
+ s32e a10, a0, -24
+ s32e a11, a0, -20
+ rfwo
+
+ENDPROC(_WindowOverflow12)
+
+/* 12-Register Window Underflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowUnderflow12)
+
+ l32e a1, a13, -12
+ l32e a0, a13, -16
+ l32e a11, a1, -12
+ l32e a2, a13, -8
+ l32e a4, a11, -48
+ l32e a8, a11, -32
+ l32e a3, a13, -4
+ l32e a5, a11, -44
+ l32e a6, a11, -40
+ l32e a7, a11, -36
+ l32e a9, a11, -28
+ l32e a10, a11, -24
+ l32e a11, a11, -20
+ rfwu
+
+ENDPROC(_WindowUnderflow12)
+
+#endif
+
+ .text
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..965a3952c
--- /dev/null
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -0,0 +1,380 @@
+/*
+ * arch/xtensa/kernel/vmlinux.lds.S
+ *
+ * Xtensa linker script
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+#include <asm/core.h>
+#include <asm/vectors.h>
+
+OUTPUT_ARCH(xtensa)
+ENTRY(_start)
+
+#ifdef __XTENSA_EB__
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+/* Note: In the following macros, it would be nice to specify only the
+ vector name and section kind and construct "sym" and "section" using
+ CPP concatenation, but that does not work reliably. Concatenating a
+ string with "." produces an invalid token. CPP will not print a
+ warning because it thinks this is an assembly file, but it leaves
+ them as multiple tokens and there may or may not be whitespace
+ between them. */
+
+/* Macro for a relocation entry */
+
+#define RELOCATE_ENTRY(sym, section) \
+ LONG(sym ## _start); \
+ LONG(sym ## _end); \
+ LONG(LOADADDR(section))
+
+#if !defined(CONFIG_VECTORS_ADDR) && XCHAL_HAVE_VECBASE
+#define MERGED_VECTORS 1
+#else
+#define MERGED_VECTORS 0
+#endif
+
+/*
+ * Macro to define a section for a vector. When MERGED_VECTORS is 0
+ * code for every vector is located with other init data. At startup
+ * time head.S copies code for every vector to its final position according
+ * to description recorded in the corresponding RELOCATE_ENTRY.
+ */
+
+#define SECTION_VECTOR4(sym, section, addr, prevsec) \
+ section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
+ { \
+ . = ALIGN(4); \
+ sym ## _start = ABSOLUTE(.); \
+ *(section) \
+ sym ## _end = ABSOLUTE(.); \
+ }
+
+#define SECTION_VECTOR2(section, addr) \
+ . = addr; \
+ *(section)
+
+/*
+ * Mapping of input sections to output sections when linking.
+ */
+
+SECTIONS
+{
+ . = KERNELOFFSET;
+ /* .text section */
+
+ _text = .;
+ _stext = .;
+
+ .text :
+ {
+ /* The HEAD_TEXT section must be the first section! */
+ HEAD_TEXT
+
+#if MERGED_VECTORS
+ . = ALIGN(PAGE_SIZE);
+ _vecbase = .;
+
+#ifdef SUPPORT_WINDOWED
+ SECTION_VECTOR2 (.WindowVectors.text, WINDOW_VECTORS_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 2
+ SECTION_VECTOR2 (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ SECTION_VECTOR2 (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ SECTION_VECTOR2 (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ SECTION_VECTOR2 (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ SECTION_VECTOR2 (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR)
+#endif
+ SECTION_VECTOR2 (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR)
+ SECTION_VECTOR2 (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
+ SECTION_VECTOR2 (.UserExceptionVector.text, USER_VECTOR_VADDR)
+ SECTION_VECTOR2 (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
+
+ *(.exception.text)
+#endif
+
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ ENTRY_TEXT
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ *(.fixup)
+ }
+ _etext = .;
+ PROVIDE (etext = .);
+
+ . = ALIGN(16);
+
+ RO_DATA(4096)
+
+ /* Data section */
+
+#ifdef CONFIG_XIP_KERNEL
+ INIT_TEXT_SECTION(PAGE_SIZE)
+#else
+ _sdata = .;
+ RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+
+ /* Initialization code and data: */
+
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+
+ .init.data :
+ {
+ INIT_DATA
+ }
+#endif
+
+ .init.rodata :
+ {
+ . = ALIGN(0x4);
+ __tagtable_begin = .;
+ *(.taglist)
+ __tagtable_end = .;
+
+ . = ALIGN(16);
+ __boot_reloc_table_start = ABSOLUTE(.);
+
+#if !MERGED_VECTORS
+#ifdef SUPPORT_WINDOWED
+ RELOCATE_ENTRY(_WindowVectors_text,
+ .WindowVectors.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 2
+ RELOCATE_ENTRY(_Level2InterruptVector_text,
+ .Level2InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ RELOCATE_ENTRY(_Level3InterruptVector_text,
+ .Level3InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ RELOCATE_ENTRY(_Level4InterruptVector_text,
+ .Level4InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ RELOCATE_ENTRY(_Level5InterruptVector_text,
+ .Level5InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ RELOCATE_ENTRY(_Level6InterruptVector_text,
+ .Level6InterruptVector.text);
+#endif
+ RELOCATE_ENTRY(_KernelExceptionVector_text,
+ .KernelExceptionVector.text);
+ RELOCATE_ENTRY(_UserExceptionVector_text,
+ .UserExceptionVector.text);
+ RELOCATE_ENTRY(_DoubleExceptionVector_text,
+ .DoubleExceptionVector.text);
+ RELOCATE_ENTRY(_DebugInterruptVector_text,
+ .DebugInterruptVector.text);
+ RELOCATE_ENTRY(_exception_text,
+ .exception.text);
+#endif
+#ifdef CONFIG_XIP_KERNEL
+ RELOCATE_ENTRY(_xip_data, .data);
+ RELOCATE_ENTRY(_xip_init_data, .init.data);
+#endif
+#if defined(CONFIG_SECONDARY_RESET_VECTOR)
+ RELOCATE_ENTRY(_SecondaryResetVector_text,
+ .SecondaryResetVector.text);
+#endif
+
+ __boot_reloc_table_end = ABSOLUTE(.) ;
+
+ INIT_SETUP(XCHAL_ICACHE_LINESIZE)
+ INIT_CALLS
+ CON_INITCALL
+ INIT_RAM_FS
+ }
+
+ PERCPU_SECTION(XCHAL_ICACHE_LINESIZE)
+
+ /* We need this dummy segment here */
+
+ . = ALIGN(4);
+ .dummy : { LONG(0) }
+
+#undef LAST
+#define LAST .dummy
+
+#if !MERGED_VECTORS
+ /* The vectors are relocated to the real position at startup time */
+
+#ifdef SUPPORT_WINDOWED
+ SECTION_VECTOR4 (_WindowVectors_text,
+ .WindowVectors.text,
+ WINDOW_VECTORS_VADDR,
+ LAST)
+#undef LAST
+#define LAST .WindowVectors.text
+#endif
+ SECTION_VECTOR4 (_DebugInterruptVector_text,
+ .DebugInterruptVector.text,
+ DEBUG_VECTOR_VADDR,
+ LAST)
+#undef LAST
+#define LAST .DebugInterruptVector.text
+#if XCHAL_EXCM_LEVEL >= 2
+ SECTION_VECTOR4 (_Level2InterruptVector_text,
+ .Level2InterruptVector.text,
+ INTLEVEL2_VECTOR_VADDR,
+ LAST)
+# undef LAST
+# define LAST .Level2InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ SECTION_VECTOR4 (_Level3InterruptVector_text,
+ .Level3InterruptVector.text,
+ INTLEVEL3_VECTOR_VADDR,
+ LAST)
+# undef LAST
+# define LAST .Level3InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ SECTION_VECTOR4 (_Level4InterruptVector_text,
+ .Level4InterruptVector.text,
+ INTLEVEL4_VECTOR_VADDR,
+ LAST)
+# undef LAST
+# define LAST .Level4InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ SECTION_VECTOR4 (_Level5InterruptVector_text,
+ .Level5InterruptVector.text,
+ INTLEVEL5_VECTOR_VADDR,
+ LAST)
+# undef LAST
+# define LAST .Level5InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ SECTION_VECTOR4 (_Level6InterruptVector_text,
+ .Level6InterruptVector.text,
+ INTLEVEL6_VECTOR_VADDR,
+ LAST)
+# undef LAST
+# define LAST .Level6InterruptVector.text
+#endif
+ SECTION_VECTOR4 (_KernelExceptionVector_text,
+ .KernelExceptionVector.text,
+ KERNEL_VECTOR_VADDR,
+ LAST)
+#undef LAST
+ SECTION_VECTOR4 (_UserExceptionVector_text,
+ .UserExceptionVector.text,
+ USER_VECTOR_VADDR,
+ .KernelExceptionVector.text)
+ SECTION_VECTOR4 (_DoubleExceptionVector_text,
+ .DoubleExceptionVector.text,
+ DOUBLEEXC_VECTOR_VADDR,
+ .UserExceptionVector.text)
+#define LAST .DoubleExceptionVector.text
+
+#endif
+#if defined(CONFIG_SECONDARY_RESET_VECTOR)
+
+ SECTION_VECTOR4 (_SecondaryResetVector_text,
+ .SecondaryResetVector.text,
+ RESET_VECTOR1_VADDR,
+ LAST)
+#undef LAST
+#define LAST .SecondaryResetVector.text
+
+#endif
+#if !MERGED_VECTORS
+ SECTION_VECTOR4 (_exception_text,
+ .exception.text,
+ ,
+ LAST)
+#undef LAST
+#define LAST .exception.text
+
+#endif
+ . = (LOADADDR(LAST) + SIZEOF(LAST) + 3) & ~ 3;
+
+ .dummy1 : AT(ADDR(.dummy1)) { LONG(0) }
+ . = ALIGN(PAGE_SIZE);
+
+#ifndef CONFIG_XIP_KERNEL
+ __init_end = .;
+
+ BSS_SECTION(0, 8192, 0)
+#endif
+
+ _end = .;
+
+#ifdef CONFIG_XIP_KERNEL
+ . = CONFIG_XIP_DATA_ADDR;
+
+ _xip_start = .;
+
+#undef LOAD_OFFSET
+#define LOAD_OFFSET \
+ (CONFIG_XIP_DATA_ADDR - (LOADADDR(.dummy1) + SIZEOF(.dummy1) + 3) & ~ 3)
+
+ _xip_data_start = .;
+ _sdata = .;
+ RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+ _xip_data_end = .;
+
+ /* Initialization data: */
+
+ STRUCT_ALIGN();
+
+ _xip_init_data_start = .;
+ __init_begin = .;
+ .init.data :
+ {
+ INIT_DATA
+ }
+ _xip_init_data_end = .;
+ __init_end = .;
+ BSS_SECTION(0, 8192, 0)
+
+ _xip_end = .;
+
+#undef LOAD_OFFSET
+#endif
+
+ DWARF_DEBUG
+
+ .xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) }
+ .xt.insn 0 : { KEEP(*(.xt.insn .xt.insn.* .gnu.linkonce.x*)) }
+ .xt.lit 0 : { KEEP(*(.xt.lit .xt.lit.* .gnu.linkonce.p*)) }
+
+ /* Sections to be discarded */
+ DISCARDS
+}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
new file mode 100644
index 000000000..17a7ef86f
--- /dev/null
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -0,0 +1,123 @@
+/*
+ * arch/xtensa/kernel/xtensa_ksyms.c
+ *
+ * Export Xtensa-specific functions for loadable modules.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+#include <linux/in6.h>
+
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/ftrace.h>
+#ifdef CONFIG_BLK_DEV_FD
+#include <asm/floppy.h>
+#endif
+#ifdef CONFIG_NET
+#include <net/checksum.h>
+#endif /* CONFIG_NET */
+
+
+/*
+ * String functions
+ */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
+#ifdef CONFIG_ARCH_HAS_STRNCPY_FROM_USER
+EXPORT_SYMBOL(__strncpy_user);
+#endif
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * gcc internal math functions
+ */
+extern long long __ashrdi3(long long, int);
+extern long long __ashldi3(long long, int);
+extern long long __bswapdi2(long long);
+extern int __bswapsi2(int);
+extern long long __lshrdi3(long long, int);
+extern int __divsi3(int, int);
+extern int __modsi3(int, int);
+extern int __mulsi3(int, int);
+extern unsigned int __udivsi3(unsigned int, unsigned int);
+extern unsigned int __umodsi3(unsigned int, unsigned int);
+extern unsigned long long __umulsidi3(unsigned int, unsigned int);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__bswapdi2);
+EXPORT_SYMBOL(__bswapsi2);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__mulsi3);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__umulsidi3);
+
+unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__sync_fetch_and_and_4);
+
+unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__sync_fetch_and_or_4);
+
+/*
+ * Networking support
+ */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_generic);
+
+/*
+ * Architecture-specific symbols
+ */
+EXPORT_SYMBOL(__xtensa_copy_user);
+EXPORT_SYMBOL(__invalidate_icache_range);
+
+/*
+ * Kernel hacking ...
+ */
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+// FIXME EXPORT_SYMBOL(screen_info);
+#endif
+
+extern long common_exception_return;
+EXPORT_SYMBOL(common_exception_return);
+
+#ifdef CONFIG_FUNCTION_TRACER
+EXPORT_SYMBOL(_mcount);
+#endif
+
+EXPORT_SYMBOL(__invalidate_dcache_range);
+#if XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif