summaryrefslogtreecommitdiffstats
path: root/arch/xtensa
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /arch/xtensa
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--arch/xtensa/Kconfig586
-rw-r--r--arch/xtensa/Kconfig.debug33
-rw-r--r--arch/xtensa/Makefile107
-rw-r--r--arch/xtensa/boot/.gitignore2
-rw-r--r--arch/xtensa/boot/Makefile52
-rw-r--r--arch/xtensa/boot/boot-elf/.gitignore1
-rw-r--r--arch/xtensa/boot/boot-elf/Makefile34
-rw-r--r--arch/xtensa/boot/boot-elf/boot.lds.S43
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S78
-rw-r--r--arch/xtensa/boot/boot-redboot/Makefile35
-rw-r--r--arch/xtensa/boot/boot-redboot/boot.ld59
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S251
-rw-r--r--arch/xtensa/boot/dts/Makefile17
-rw-r--r--arch/xtensa/boot/dts/csp.dts55
-rw-r--r--arch/xtensa/boot/dts/kc705.dts31
-rw-r--r--arch/xtensa/boot/dts/kc705_nommu.dts18
-rw-r--r--arch/xtensa/boot/dts/lx200mx.dts17
-rw-r--r--arch/xtensa/boot/dts/lx60.dts12
-rw-r--r--arch/xtensa/boot/dts/ml605.dts12
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi29
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi29
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi21
-rw-r--r--arch/xtensa/boot/dts/xtfpga.dtsi137
-rw-r--r--arch/xtensa/boot/lib/.gitignore3
-rw-r--r--arch/xtensa/boot/lib/Makefile31
-rw-r--r--arch/xtensa/boot/lib/zmem.c80
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig141
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig121
-rw-r--r--arch/xtensa/configs/common_defconfig54
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig129
-rw-r--r--arch/xtensa/configs/iss_defconfig34
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig130
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig134
-rw-r--r--arch/xtensa/include/asm/Kbuild30
-rw-r--r--arch/xtensa/include/asm/asm-offsets.h1
-rw-r--r--arch/xtensa/include/asm/asm-uaccess.h157
-rw-r--r--arch/xtensa/include/asm/asmmacro.h194
-rw-r--r--arch/xtensa/include/asm/atomic.h205
-rw-r--r--arch/xtensa/include/asm/barrier.h21
-rw-r--r--arch/xtensa/include/asm/bitops.h237
-rw-r--r--arch/xtensa/include/asm/bootparam.h50
-rw-r--r--arch/xtensa/include/asm/bugs.h18
-rw-r--r--arch/xtensa/include/asm/cache.h34
-rw-r--r--arch/xtensa/include/asm/cacheasm.h214
-rw-r--r--arch/xtensa/include/asm/cacheflush.h182
-rw-r--r--arch/xtensa/include/asm/checksum.h254
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h162
-rw-r--r--arch/xtensa/include/asm/coprocessor.h177
-rw-r--r--arch/xtensa/include/asm/current.h38
-rw-r--r--arch/xtensa/include/asm/delay.h75
-rw-r--r--arch/xtensa/include/asm/dma.h62
-rw-r--r--arch/xtensa/include/asm/elf.h207
-rw-r--r--arch/xtensa/include/asm/fixmap.h85
-rw-r--r--arch/xtensa/include/asm/flat.h24
-rw-r--r--arch/xtensa/include/asm/ftrace.h40
-rw-r--r--arch/xtensa/include/asm/futex.h127
-rw-r--r--arch/xtensa/include/asm/highmem.h100
-rw-r--r--arch/xtensa/include/asm/hw_breakpoint.h61
-rw-r--r--arch/xtensa/include/asm/hw_irq.h14
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h216
-rw-r--r--arch/xtensa/include/asm/io.h85
-rw-r--r--arch/xtensa/include/asm/irq.h42
-rw-r--r--arch/xtensa/include/asm/irqflags.h82
-rw-r--r--arch/xtensa/include/asm/kasan.h39
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h75
-rw-r--r--arch/xtensa/include/asm/linkage.h9
-rw-r--r--arch/xtensa/include/asm/mmu.h22
-rw-r--r--arch/xtensa/include/asm/mmu_context.h159
-rw-r--r--arch/xtensa/include/asm/module.h20
-rw-r--r--arch/xtensa/include/asm/mxregs.h46
-rw-r--r--arch/xtensa/include/asm/nommu_context.h34
-rw-r--r--arch/xtensa/include/asm/page.h199
-rw-r--r--arch/xtensa/include/asm/pci-bridge.h88
-rw-r--r--arch/xtensa/include/asm/pci.h53
-rw-r--r--arch/xtensa/include/asm/perf_event.h4
-rw-r--r--arch/xtensa/include/asm/pgalloc.h85
-rw-r--r--arch/xtensa/include/asm/pgtable.h454
-rw-r--r--arch/xtensa/include/asm/platform.h105
-rw-r--r--arch/xtensa/include/asm/processor.h250
-rw-r--r--arch/xtensa/include/asm/ptrace.h110
-rw-r--r--arch/xtensa/include/asm/regs.h117
-rw-r--r--arch/xtensa/include/asm/segment.h16
-rw-r--r--arch/xtensa/include/asm/serial.h18
-rw-r--r--arch/xtensa/include/asm/shmparam.h21
-rw-r--r--arch/xtensa/include/asm/signal.h23
-rw-r--r--arch/xtensa/include/asm/smp.h43
-rw-r--r--arch/xtensa/include/asm/spinlock.h199
-rw-r--r--arch/xtensa/include/asm/spinlock_types.h21
-rw-r--r--arch/xtensa/include/asm/stackprotector.h40
-rw-r--r--arch/xtensa/include/asm/stacktrace.h44
-rw-r--r--arch/xtensa/include/asm/string.h140
-rw-r--r--arch/xtensa/include/asm/switch_to.h22
-rw-r--r--arch/xtensa/include/asm/syscall.h27
-rw-r--r--arch/xtensa/include/asm/sysmem.h19
-rw-r--r--arch/xtensa/include/asm/thread_info.h135
-rw-r--r--arch/xtensa/include/asm/timex.h70
-rw-r--r--arch/xtensa/include/asm/tlb.h47
-rw-r--r--arch/xtensa/include/asm/tlbflush.h208
-rw-r--r--arch/xtensa/include/asm/traps.h120
-rw-r--r--arch/xtensa/include/asm/types.h23
-rw-r--r--arch/xtensa/include/asm/uaccess.h311
-rw-r--r--arch/xtensa/include/asm/ucontext.h22
-rw-r--r--arch/xtensa/include/asm/unaligned.h29
-rw-r--r--arch/xtensa/include/asm/unistd.h25
-rw-r--r--arch/xtensa/include/asm/user.h20
-rw-r--r--arch/xtensa/include/asm/vectors.h125
-rw-r--r--arch/xtensa/include/asm/vga.h19
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild13
-rw-r--r--arch/xtensa/include/uapi/asm/auxvec.h4
-rw-r--r--arch/xtensa/include/uapi/asm/byteorder.h13
-rw-r--r--arch/xtensa/include/uapi/asm/ioctls.h128
-rw-r--r--arch/xtensa/include/uapi/asm/ipcbuf.h38
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h120
-rw-r--r--arch/xtensa/include/uapi/asm/msgbuf.h48
-rw-r--r--arch/xtensa/include/uapi/asm/param.h31
-rw-r--r--arch/xtensa/include/uapi/asm/poll.h21
-rw-r--r--arch/xtensa/include/uapi/asm/posix_types.h40
-rw-r--r--arch/xtensa/include/uapi/asm/ptrace.h40
-rw-r--r--arch/xtensa/include/uapi/asm/sembuf.h44
-rw-r--r--arch/xtensa/include/uapi/asm/setup.h19
-rw-r--r--arch/xtensa/include/uapi/asm/shmbuf.h51
-rw-r--r--arch/xtensa/include/uapi/asm/sigcontext.h29
-rw-r--r--arch/xtensa/include/uapi/asm/signal.h134
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h122
-rw-r--r--arch/xtensa/include/uapi/asm/sockios.h32
-rw-r--r--arch/xtensa/include/uapi/asm/stat.h60
-rw-r--r--arch/xtensa/include/uapi/asm/swab.h71
-rw-r--r--arch/xtensa/include/uapi/asm/termbits.h221
-rw-r--r--arch/xtensa/include/uapi/asm/types.h30
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h803
-rw-r--r--arch/xtensa/kernel/.gitignore1
-rw-r--r--arch/xtensa/kernel/Makefile42
-rw-r--r--arch/xtensa/kernel/align.S485
-rw-r--r--arch/xtensa/kernel/asm-offsets.c149
-rw-r--r--arch/xtensa/kernel/coprocessor.S330
-rw-r--r--arch/xtensa/kernel/entry.S2047
-rw-r--r--arch/xtensa/kernel/head.S379
-rw-r--r--arch/xtensa/kernel/hw_breakpoint.c307
-rw-r--r--arch/xtensa/kernel/irq.c196
-rw-r--r--arch/xtensa/kernel/mcount.S50
-rw-r--r--arch/xtensa/kernel/module.c189
-rw-r--r--arch/xtensa/kernel/mxhead.S62
-rw-r--r--arch/xtensa/kernel/pci-dma.c212
-rw-r--r--arch/xtensa/kernel/pci.c216
-rw-r--r--arch/xtensa/kernel/perf_event.c446
-rw-r--r--arch/xtensa/kernel/platform.c46
-rw-r--r--arch/xtensa/kernel/process.c373
-rw-r--r--arch/xtensa/kernel/ptrace.c517
-rw-r--r--arch/xtensa/kernel/s32c1i_selftest.c128
-rw-r--r--arch/xtensa/kernel/setup.c740
-rw-r--r--arch/xtensa/kernel/signal.c496
-rw-r--r--arch/xtensa/kernel/smp.c624
-rw-r--r--arch/xtensa/kernel/stacktrace.c270
-rw-r--r--arch/xtensa/kernel/syscall.c98
-rw-r--r--arch/xtensa/kernel/time.c218
-rw-r--r--arch/xtensa/kernel/traps.c546
-rw-r--r--arch/xtensa/kernel/vectors.S791
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S336
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c132
-rw-r--r--arch/xtensa/lib/Makefile7
-rw-r--r--arch/xtensa/lib/checksum.S398
-rw-r--r--arch/xtensa/lib/memcopy.S553
-rw-r--r--arch/xtensa/lib/memset.S153
-rw-r--r--arch/xtensa/lib/pci-auto.c320
-rw-r--r--arch/xtensa/lib/strncpy_user.S217
-rw-r--r--arch/xtensa/lib/strnlen_user.S141
-rw-r--r--arch/xtensa/lib/usercopy.S284
-rw-r--r--arch/xtensa/mm/Makefile12
-rw-r--r--arch/xtensa/mm/cache.c328
-rw-r--r--arch/xtensa/mm/fault.c255
-rw-r--r--arch/xtensa/mm/highmem.c95
-rw-r--r--arch/xtensa/mm/init.c269
-rw-r--r--arch/xtensa/mm/ioremap.c68
-rw-r--r--arch/xtensa/mm/kasan_init.c95
-rw-r--r--arch/xtensa/mm/misc.S484
-rw-r--r--arch/xtensa/mm/mmu.c114
-rw-r--r--arch/xtensa/mm/tlb.c276
-rw-r--r--arch/xtensa/oprofile/Makefile10
-rw-r--r--arch/xtensa/oprofile/backtrace.c27
-rw-r--r--arch/xtensa/oprofile/init.c26
-rw-r--r--arch/xtensa/platforms/iss/Makefile10
-rw-r--r--arch/xtensa/platforms/iss/console.c268
-rw-r--r--arch/xtensa/platforms/iss/include/platform/serial.h15
-rw-r--r--arch/xtensa/platforms/iss/include/platform/simcall.h142
-rw-r--r--arch/xtensa/platforms/iss/network.c683
-rw-r--r--arch/xtensa/platforms/iss/setup.c106
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c372
-rw-r--r--arch/xtensa/platforms/xt2000/Makefile5
-rw-r--r--arch/xtensa/platforms/xt2000/include/platform/hardware.h43
-rw-r--r--arch/xtensa/platforms/xt2000/include/platform/serial.h28
-rw-r--r--arch/xtensa/platforms/xt2000/setup.c155
-rw-r--r--arch/xtensa/platforms/xtfpga/Makefile10
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h60
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/lcd.h35
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/serial.h18
-rw-r--r--arch/xtensa/platforms/xtfpga/lcd.c89
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c310
-rw-r--r--arch/xtensa/variants/csp/include/variant/core.h575
-rw-r--r--arch/xtensa/variants/csp/include/variant/tie-asm.h194
-rw-r--r--arch/xtensa/variants/csp/include/variant/tie.h161
-rw-r--r--arch/xtensa/variants/dc232b/include/variant/core.h424
-rw-r--r--arch/xtensa/variants/dc232b/include/variant/tie-asm.h122
-rw-r--r--arch/xtensa/variants/dc232b/include/variant/tie.h131
-rw-r--r--arch/xtensa/variants/dc233c/include/variant/core.h475
-rw-r--r--arch/xtensa/variants/dc233c/include/variant/tie-asm.h193
-rw-r--r--arch/xtensa/variants/dc233c/include/variant/tie.h150
-rw-r--r--arch/xtensa/variants/de212/include/variant/core.h594
-rw-r--r--arch/xtensa/variants/de212/include/variant/tie-asm.h170
-rw-r--r--arch/xtensa/variants/de212/include/variant/tie.h136
-rw-r--r--arch/xtensa/variants/fsf/include/variant/core.h359
-rw-r--r--arch/xtensa/variants/fsf/include/variant/tie-asm.h70
-rw-r--r--arch/xtensa/variants/fsf/include/variant/tie.h72
-rw-r--r--arch/xtensa/variants/test_kc705_be/include/variant/core.h575
-rw-r--r--arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h308
-rw-r--r--arch/xtensa/variants/test_kc705_be/include/variant/tie.h182
-rw-r--r--arch/xtensa/variants/test_kc705_hifi/include/variant/core.h531
-rw-r--r--arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h328
-rw-r--r--arch/xtensa/variants/test_kc705_hifi/include/variant/tie.h189
-rw-r--r--arch/xtensa/variants/test_mmuhifi_c3/include/variant/core.h383
-rw-r--r--arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h182
-rw-r--r--arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie.h140
221 files changed, 35029 insertions, 0 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
new file mode 100644
index 000000000..d9b0709d6
--- /dev/null
+++ b/arch/xtensa/Kconfig
@@ -0,0 +1,586 @@
+# SPDX-License-Identifier: GPL-2.0
+config ZONE_DMA
+ def_bool y
+
+config XTENSA
+ def_bool y
+ select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
+ select ARCH_WANT_FRAME_POINTERS
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select BUILDTIME_EXTABLE_SORT
+ select CLONE_BACKWARDS
+ select COMMON_CLK
+ select DMA_NONCOHERENT_OPS
+ select GENERIC_ATOMIC64
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_SHOW
+ select GENERIC_PCI_IOMAP
+ select GENERIC_SCHED_CLOCK
+ select GENERIC_STRNCPY_FROM_USER if KASAN
+ select HAVE_ARCH_KASAN if MMU
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_EXIT_THREAD
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUTEX_CMPXCHG if !MMU && FUTEX
+ select HAVE_HW_BREAKPOINT if PERF_EVENTS
+ select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_MEMBLOCK
+ select HAVE_OPROFILE
+ select HAVE_PERF_EVENTS
+ select HAVE_STACKPROTECTOR
+ select IRQ_DOMAIN
+ select MODULES_USE_ELF_RELA
+ select NO_BOOTMEM
+ select PERF_USE_VMALLOC
+ select VIRT_TO_BUS
+ help
+ Xtensa processors are 32-bit RISC machines designed by Tensilica
+ primarily for embedded systems. These processors are both
+ configurable and extensible. The Linux port to the Xtensa
+ architecture supports all processor configurations and extensions,
+ with reasonable minimum requirements. The Xtensa Linux project has
+ a home page at <http://www.linux-xtensa.org/>.
+
+config RWSEM_XCHGADD_ALGORITHM
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config ARCH_HAS_ILOG2_U32
+ def_bool n
+
+config ARCH_HAS_ILOG2_U64
+ def_bool n
+
+config NO_IOPORT_MAP
+ def_bool n
+
+config HZ
+ int
+ default 100
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
+config MMU
+ def_bool n
+
+config HAVE_XTENSA_GPIO32
+ def_bool n
+
+config KASAN_SHADOW_OFFSET
+ hex
+ default 0x6e400000
+
+menu "Processor type and features"
+
+choice
+ prompt "Xtensa Processor Configuration"
+ default XTENSA_VARIANT_FSF
+
+config XTENSA_VARIANT_FSF
+ bool "fsf - default (not generic) configuration"
+ select MMU
+
+config XTENSA_VARIANT_DC232B
+ bool "dc232b - Diamond 232L Standard Core Rev.B (LE)"
+ select MMU
+ select HAVE_XTENSA_GPIO32
+ help
+ This variant refers to Tensilica's Diamond 232L Standard core Rev.B (LE).
+
+config XTENSA_VARIANT_DC233C
+ bool "dc233c - Diamond 233L Standard Core Rev.C (LE)"
+ select MMU
+ select HAVE_XTENSA_GPIO32
+ help
+ This variant refers to Tensilica's Diamond 233L Standard core Rev.C (LE).
+
+config XTENSA_VARIANT_CUSTOM
+ bool "Custom Xtensa processor configuration"
+ select HAVE_XTENSA_GPIO32
+ help
+ Select this variant to use a custom Xtensa processor configuration.
+ You will be prompted for a processor variant CORENAME.
+endchoice
+
+config XTENSA_VARIANT_CUSTOM_NAME
+ string "Xtensa Processor Custom Core Variant Name"
+ depends on XTENSA_VARIANT_CUSTOM
+ help
+ Provide the name of a custom Xtensa processor variant.
+ This CORENAME selects arch/xtensa/variant/CORENAME.
+ Dont forget you have to select MMU if you have one.
+
+config XTENSA_VARIANT_NAME
+ string
+ default "dc232b" if XTENSA_VARIANT_DC232B
+ default "dc233c" if XTENSA_VARIANT_DC233C
+ default "fsf" if XTENSA_VARIANT_FSF
+ default XTENSA_VARIANT_CUSTOM_NAME if XTENSA_VARIANT_CUSTOM
+
+config XTENSA_VARIANT_MMU
+ bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)"
+ depends on XTENSA_VARIANT_CUSTOM
+ default y
+ select MMU
+ help
+ Build a Conventional Kernel with full MMU support,
+ ie: it supports a TLB with auto-loading, page protection.
+
+config XTENSA_VARIANT_HAVE_PERF_EVENTS
+ bool "Core variant has Performance Monitor Module"
+ depends on XTENSA_VARIANT_CUSTOM
+ default n
+ help
+ Enable if core variant has Performance Monitor Module with
+ External Registers Interface.
+
+ If unsure, say N.
+
+config XTENSA_FAKE_NMI
+ bool "Treat PMM IRQ as NMI"
+ depends on XTENSA_VARIANT_HAVE_PERF_EVENTS
+ default n
+ help
+ If PMM IRQ is the only IRQ at EXCM level it is safe to
+ treat it as NMI, which improves accuracy of profiling.
+
+ If there are other interrupts at or above PMM IRQ priority level
+ but not above the EXCM level, PMM IRQ still may be treated as NMI,
+ but only if these IRQs are not used. There will be a build warning
+ saying that this is not safe, and a bugcheck if one of these IRQs
+ actually fire.
+
+ If unsure, say N.
+
+config XTENSA_UNALIGNED_USER
+ bool "Unaligned memory access in use space"
+ help
+ The Xtensa architecture currently does not handle unaligned
+ memory accesses in hardware but through an exception handler.
+ Per default, unaligned memory accesses are disabled in user space.
+
+ Say Y here to enable unaligned memory access in user space.
+
+config HAVE_SMP
+ bool "System Supports SMP (MX)"
+ depends on XTENSA_VARIANT_CUSTOM
+ select XTENSA_MX
+ help
+ This option is use to indicate that the system-on-a-chip (SOC)
+ supports Multiprocessing. Multiprocessor support implemented above
+ the CPU core definition and currently needs to be selected manually.
+
+ Multiprocessor support in implemented with external cache and
+ interrupt controllers.
+
+ The MX interrupt distributer adds Interprocessor Interrupts
+ and causes the IRQ numbers to be increased by 4 for devices
+ like the open cores ethernet driver and the serial interface.
+
+ You still have to select "Enable SMP" to enable SMP on this SOC.
+
+config SMP
+ bool "Enable Symmetric multi-processing support"
+ depends on HAVE_SMP
+ select GENERIC_SMP_IDLE_THREAD
+ help
+ Enabled SMP Software; allows more than one CPU/CORE
+ to be activated during startup.
+
+config NR_CPUS
+ depends on SMP
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+ default "4"
+
+config HOTPLUG_CPU
+ bool "Enable CPU hotplug support"
+ depends on SMP
+ help
+ Say Y here to allow turning CPUs off and on. CPUs can be
+ controlled through /sys/devices/system/cpu.
+
+ Say N if you want to disable CPU hotplug.
+
+config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ bool "Initialize Xtensa MMU inside the Linux kernel code"
+ depends on !XTENSA_VARIANT_FSF && !XTENSA_VARIANT_DC232B
+ default y if XTENSA_VARIANT_DC233C || XTENSA_VARIANT_CUSTOM
+ help
+ Earlier version initialized the MMU in the exception vector
+ before jumping to _startup in head.S and had an advantage that
+ it was possible to place a software breakpoint at 'reset' and
+ then enter your normal kernel breakpoints once the MMU was mapped
+ to the kernel mappings (0XC0000000).
+
+ This unfortunately won't work for U-Boot and likely also wont
+ work for using KEXEC to have a hot kernel ready for doing a
+ KDUMP.
+
+ So now the MMU is initialized in head.S but it's necessary to
+ use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
+ xt-gdb can't place a Software Breakpoint in the 0XD region prior
+ to mapping the MMU and after mapping even if the area of low memory
+ was mapped gdb wouldn't remove the breakpoint on hitting it as the
+ PC wouldn't match. Since Hardware Breakpoints are recommended for
+ Linux configurations it seems reasonable to just assume they exist
+ and leave this older mechanism for unfortunate souls that choose
+ not to follow Tensilica's recommendation.
+
+ Selecting this will cause U-Boot to set the KERNEL Load and Entry
+ address at 0x00003000 instead of the mapped std of 0xD0003000.
+
+ If in doubt, say Y.
+
+config MEMMAP_CACHEATTR
+ hex "Cache attributes for the memory address space"
+ depends on !MMU
+ default 0x22222222
+ help
+ These cache attributes are set up for noMMU systems. Each hex digit
+ specifies cache attributes for the corresponding 512MB memory
+ region: bits 0..3 -- for addresses 0x00000000..0x1fffffff,
+ bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on.
+
+ Cache attribute values are specific for the MMU type, so e.g.
+ for region protection MMUs: 2 is cache bypass, 4 is WB cached,
+ 1 is WT cached, f is illegal. For ful MMU: bit 0 makes it executable,
+ bit 1 makes it writable, bits 2..3 meaning is 0: cache bypass,
+ 1: WB cache, 2: WT cache, 3: special (c and e are illegal, f is
+ reserved).
+
+config KSEG_PADDR
+ hex "Physical address of the KSEG mapping"
+ depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
+ default 0x00000000
+ help
+ This is the physical address where KSEG is mapped. Please refer to
+ the chosen KSEG layout help for the required address alignment.
+ Unpacked kernel image (including vectors) must be located completely
+ within KSEG.
+ Physical memory below this address is not available to linux.
+
+ If unsure, leave the default value here.
+
+config KERNEL_LOAD_ADDRESS
+ hex "Kernel load address"
+ default 0x60003000 if !MMU
+ default 0x00003000 if MMU && INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ default 0xd0003000 if MMU && !INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ help
+ This is the address where the kernel is loaded.
+ It is virtual address for MMUv2 configurations and physical address
+ for all other configurations.
+
+ If unsure, leave the default value here.
+
+config VECTORS_OFFSET
+ hex "Kernel vectors offset"
+ default 0x00003000
+ help
+ This is the offset of the kernel image from the relocatable vectors
+ base.
+
+ If unsure, leave the default value here.
+
+choice
+ prompt "KSEG layout"
+ depends on MMU
+ default XTENSA_KSEG_MMU_V2
+
+config XTENSA_KSEG_MMU_V2
+ bool "MMUv2: 128MB cached + 128MB uncached"
+ help
+ MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
+ at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
+ without cache.
+ KSEG_PADDR must be aligned to 128MB.
+
+config XTENSA_KSEG_256M
+ bool "256MB cached + 256MB uncached"
+ depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ help
+ TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
+ with cache and to 0xc0000000 without cache.
+ KSEG_PADDR must be aligned to 256MB.
+
+config XTENSA_KSEG_512M
+ bool "512MB cached + 512MB uncached"
+ depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ help
+ TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
+ with cache and to 0xc0000000 without cache.
+ KSEG_PADDR must be aligned to 256MB.
+
+endchoice
+
+config HIGHMEM
+ bool "High Memory Support"
+ depends on MMU
+ help
+ Linux can use the full amount of RAM in the system by
+ default. However, the default MMUv2 setup only maps the
+ lowermost 128 MB of memory linearly to the areas starting
+ at 0xd0000000 (cached) and 0xd8000000 (uncached).
+ When there are more than 128 MB memory in the system not
+ all of it can be "permanently mapped" by the kernel.
+ The physical memory that's not permanently mapped is called
+ "high memory".
+
+ If you are compiling a kernel which will never run on a
+ machine with more than 128 MB total physical RAM, answer
+ N here.
+
+ If unsure, say Y.
+
+config FAST_SYSCALL_XTENSA
+ bool "Enable fast atomic syscalls"
+ default n
+ help
+ fast_syscall_xtensa is a syscall that can make atomic operations
+ on UP kernel when processor has no s32c1i support.
+
+ This syscall is deprecated. It may have issues when called with
+ invalid arguments. It is provided only for backwards compatibility.
+ Only enable it if your userspace software requires it.
+
+ If unsure, say N.
+
+config FAST_SYSCALL_SPILL_REGISTERS
+ bool "Enable spill registers syscall"
+ default n
+ help
+ fast_syscall_spill_registers is a syscall that spills all active
+ register windows of a calling userspace task onto its stack.
+
+ This syscall is deprecated. It may have issues when called with
+ invalid arguments. It is provided only for backwards compatibility.
+ Only enable it if your userspace software requires it.
+
+ If unsure, say N.
+
+endmenu
+
+config XTENSA_CALIBRATE_CCOUNT
+ def_bool n
+ help
+ On some platforms (XT2000, for example), the CPU clock rate can
+ vary. The frequency can be determined, however, by measuring
+ against a well known, fixed frequency, such as an UART oscillator.
+
+config SERIAL_CONSOLE
+ def_bool n
+
+menu "Bus options"
+
+config PCI
+ bool "PCI support"
+ default y
+ help
+ Find out whether you have a PCI motherboard. PCI is the name of a
+ bus system, i.e. the way the CPU talks to the other stuff inside
+ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+ VESA. If you have PCI, say Y, otherwise N.
+
+source "drivers/pci/Kconfig"
+
+endmenu
+
+menu "Platform options"
+
+choice
+ prompt "Xtensa System Type"
+ default XTENSA_PLATFORM_ISS
+
+config XTENSA_PLATFORM_ISS
+ bool "ISS"
+ select XTENSA_CALIBRATE_CCOUNT
+ select SERIAL_CONSOLE
+ help
+ ISS is an acronym for Tensilica's Instruction Set Simulator.
+
+config XTENSA_PLATFORM_XT2000
+ bool "XT2000"
+ select HAVE_IDE
+ help
+ XT2000 is the name of Tensilica's feature-rich emulation platform.
+ This hardware is capable of running a full Linux distribution.
+
+config XTENSA_PLATFORM_XTFPGA
+ bool "XTFPGA"
+ select ETHOC if ETHERNET
+ select PLATFORM_WANT_DEFAULT_MEM if !MMU
+ select SERIAL_CONSOLE
+ select XTENSA_CALIBRATE_CCOUNT
+ help
+ XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
+ This hardware is capable of running a full Linux distribution.
+
+endchoice
+
+config PLATFORM_NR_IRQS
+ int
+ default 3 if XTENSA_PLATFORM_XT2000
+ default 0
+
+config XTENSA_CPU_CLOCK
+ int "CPU clock rate [MHz]"
+ depends on !XTENSA_CALIBRATE_CCOUNT
+ default 16
+
+config GENERIC_CALIBRATE_DELAY
+ bool "Auto calibration of the BogoMIPS value"
+ help
+ The BogoMIPS value can easily be derived from the CPU frequency.
+
+config CMDLINE_BOOL
+ bool "Default bootloader kernel arguments"
+
+config CMDLINE
+ string "Initial kernel command string"
+ depends on CMDLINE_BOOL
+ default "console=ttyS0,38400 root=/dev/ram"
+ help
+ On some architectures (EBSA110 and CATS), there is currently no way
+ for the boot loader to pass arguments to the kernel. For these
+ architectures, you should supply some command-line options at build
+ time by entering them here. As a minimum, you should specify the
+ memory size and the root device (e.g., mem=64M root=/dev/nfs).
+
+config USE_OF
+ bool "Flattened Device Tree support"
+ select OF
+ select OF_EARLY_FLATTREE
+ select OF_RESERVED_MEM
+ help
+ Include support for flattened device tree machine descriptions.
+
+config BUILTIN_DTB
+ string "DTB to build into the kernel image"
+ depends on OF
+
+config PARSE_BOOTPARAM
+ bool "Parse bootparam block"
+ default y
+ help
+ Parse parameters passed to the kernel from the bootloader. It may
+ be disabled if the kernel is known to run without the bootloader.
+
+ If unsure, say Y.
+
+config BLK_DEV_SIMDISK
+ tristate "Host file-based simulated block device support"
+ default n
+ depends on XTENSA_PLATFORM_ISS && BLOCK
+ help
+ Create block devices that map to files in the host file system.
+ Device binding to host file may be changed at runtime via proc
+ interface provided the device is not in use.
+
+config BLK_DEV_SIMDISK_COUNT
+ int "Number of host file-based simulated block devices"
+ range 1 10
+ depends on BLK_DEV_SIMDISK
+ default 2
+ help
+ This is the default minimal number of created block devices.
+ Kernel/module parameter 'simdisk_count' may be used to change this
+ value at runtime. More file names (but no more than 10) may be
+ specified as parameters, simdisk_count grows accordingly.
+
+config SIMDISK0_FILENAME
+ string "Host filename for the first simulated device"
+ depends on BLK_DEV_SIMDISK = y
+ default ""
+ help
+ Attach a first simdisk to a host file. Conventionally, this file
+ contains a root file system.
+
+config SIMDISK1_FILENAME
+ string "Host filename for the second simulated device"
+ depends on BLK_DEV_SIMDISK = y && BLK_DEV_SIMDISK_COUNT != 1
+ default ""
+ help
+ Another simulated disk in a host file for a buildroot-independent
+ storage.
+
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ default "11"
+ help
+ The kernel memory allocator divides physically contiguous memory
+ blocks into "zones", where each zone is a power of two number of
+ pages. This option selects the largest power of two that the kernel
+ keeps in the memory allocator. If you need to allocate very large
+ blocks of physically contiguous memory, then you may need to
+ increase this value.
+
+ This config option is actually maximum order plus one. For example,
+ a value of 11 means that the largest free memory block is 2^10 pages.
+
+source "drivers/pcmcia/Kconfig"
+
+config PLATFORM_WANT_DEFAULT_MEM
+ def_bool n
+
+config DEFAULT_MEM_START
+ hex
+ prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM
+ default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM
+ default 0x00000000
+ help
+ This is the base address used for both PAGE_OFFSET and PHYS_OFFSET
+ in noMMU configurations.
+
+ If unsure, leave the default value here.
+
+config XTFPGA_LCD
+ bool "Enable XTFPGA LCD driver"
+ depends on XTENSA_PLATFORM_XTFPGA
+ default n
+ help
+ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
+ progress messages there during bootup/shutdown. It may be useful
+ during board bringup.
+
+ If unsure, say N.
+
+config XTFPGA_LCD_BASE_ADDR
+ hex "XTFPGA LCD base address"
+ depends on XTFPGA_LCD
+ default "0x0d0c0000"
+ help
+ Base address of the LCD controller inside KIO region.
+ Different boards from XTFPGA family have LCD controller at different
+ addresses. Please consult prototyping user guide for your board for
+ the correct address. Wrong address here may lead to hardware lockup.
+
+config XTFPGA_LCD_8BIT_ACCESS
+ bool "Use 8-bit access to XTFPGA LCD"
+ depends on XTFPGA_LCD
+ default n
+ help
+ LCD may be connected with 4- or 8-bit interface, 8-bit access may
+ only be used with 8-bit interface. Please consult prototyping user
+ guide for your board for the correct interface width.
+
+endmenu
+
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+endmenu
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
new file mode 100644
index 000000000..39de98e20
--- /dev/null
+++ b/arch/xtensa/Kconfig.debug
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config DEBUG_TLB_SANITY
+ bool "Debug TLB sanity"
+ depends on DEBUG_KERNEL && MMU
+ help
+ Enable this to turn on TLB sanity check on each entry to userspace.
+ This check can spot missing TLB invalidation/wrong PTE permissions/
+ premature page freeing.
+
+ If unsure, say N.
+
+config LD_NO_RELAX
+ bool "Disable linker relaxation"
+ default y
+ help
+ Enable this function to disable link-time optimizations.
+ The default linker behavior is to combine identical literal
+ values to reduce code size and remove unnecessary overhead from
+ assembler-generated 'longcall' sequences.
+ Enabling this option improves the link time but increases the
+ code size, and possibly execution time.
+
+config S32C1I_SELFTEST
+ bool "Perform S32C1I instruction self-test at boot"
+ default y
+ help
+ Enable this option to test S32C1I instruction behavior at boot.
+ Correct operation of this instruction requires some cooperation from hardware
+ external to the processor (such as bus bridge, bus fabric, or memory controller).
+ It is easy to make wrong hardware configuration, this test should catch it early.
+
+ Say 'N' on stable hardware.
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
new file mode 100644
index 000000000..d67e30faf
--- /dev/null
+++ b/arch/xtensa/Makefile
@@ -0,0 +1,107 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2001 - 2005 Tensilica Inc.
+# Copyright (C) 2014 Cadence Design Systems Inc.
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+
+# Core configuration.
+# (Use VAR=<xtensa_config> to use another default compiler.)
+
+variant-y := $(patsubst "%",%,$(CONFIG_XTENSA_VARIANT_NAME))
+
+VARIANT = $(variant-y)
+export VARIANT
+
+# Test for cross compiling
+
+ifneq ($(VARIANT),)
+ COMPILE_ARCH = $(shell uname -m)
+
+ ifneq ($(COMPILE_ARCH), xtensa)
+ ifndef CROSS_COMPILE
+ CROSS_COMPILE = xtensa_$(VARIANT)-
+ endif
+ endif
+endif
+
+# Platform configuration
+
+platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
+platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
+platform-$(CONFIG_XTENSA_PLATFORM_XTFPGA) := xtfpga
+
+PLATFORM = $(platform-y)
+export PLATFORM
+
+# temporarily until string.h is fixed
+KBUILD_CFLAGS += -ffreestanding -D__linux__
+KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals
+KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
+KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,)
+
+KBUILD_AFLAGS += -mlongcalls -mtext-section-literals
+
+ifneq ($(CONFIG_LD_NO_RELAX),)
+KBUILD_LDFLAGS := --no-relax
+endif
+
+ifeq ($(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
+CHECKFLAGS += -D__XTENSA_EB__
+KBUILD_CPPFLAGS += -DCONFIG_CPU_BIG_ENDIAN
+endif
+ifeq ($(shell echo __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1)
+CHECKFLAGS += -D__XTENSA_EL__
+KBUILD_CPPFLAGS += -DCONFIG_CPU_LITTLE_ENDIAN
+endif
+
+vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
+plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
+
+KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
+
+KBUILD_DEFCONFIG := iss_defconfig
+
+# Only build variant and/or platform if it includes a Makefile
+
+buildvar := $(shell test -e $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
+buildplf := $(shell test -e $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
+
+# Find libgcc.a
+
+LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+
+head-y := arch/xtensa/kernel/head.o
+core-y += arch/xtensa/kernel/ arch/xtensa/mm/
+core-y += $(buildvar) $(buildplf)
+
+libs-y += arch/xtensa/lib/ $(LIBGCC)
+drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
+
+ifneq ($(CONFIG_BUILTIN_DTB),"")
+core-$(CONFIG_OF) += arch/xtensa/boot/dts/
+endif
+
+boot := arch/xtensa/boot
+
+all Image zImage uImage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $@
+
+%.dtb:
+ $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
+
+dtbs: scripts
+ $(Q)$(MAKE) $(build)=$(boot)/dts
+
+define archhelp
+ @echo '* Image - Kernel ELF image with reset vector'
+ @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
+ @echo '* uImage - U-Boot wrapped image'
+ @echo ' dtbs - Build device tree blobs for enabled boards'
+endef
diff --git a/arch/xtensa/boot/.gitignore b/arch/xtensa/boot/.gitignore
new file mode 100644
index 000000000..38177c7eb
--- /dev/null
+++ b/arch/xtensa/boot/.gitignore
@@ -0,0 +1,2 @@
+uImage
+zImage.redboot
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
new file mode 100644
index 000000000..294846117
--- /dev/null
+++ b/arch/xtensa/boot/Makefile
@@ -0,0 +1,52 @@
+#
+# arch/xtensa/boot/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+
+# KBUILD_CFLAGS used when building rest of boot (takes effect recursively)
+KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include
+HOSTFLAGS += -Iarch/$(ARCH)/boot/include
+
+BIG_ENDIAN := $(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#")
+
+export ccflags-y
+export BIG_ENDIAN
+
+subdir-y := lib
+
+# Subdirs for the boot loader(s)
+
+boot-$(CONFIG_XTENSA_PLATFORM_ISS) += Image
+boot-$(CONFIG_XTENSA_PLATFORM_XT2000) += Image zImage uImage
+boot-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += Image zImage uImage
+
+all: $(boot-y)
+Image: boot-elf
+zImage: boot-redboot
+uImage: $(obj)/uImage
+
+boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y))
+ $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
+
+OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary
+
+vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+vmlinux.bin.gz: vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+boot-elf: vmlinux.bin
+boot-redboot: vmlinux.bin.gz
+
+UIMAGE_LOADADDR = $(CONFIG_KERNEL_LOAD_ADDRESS)
+UIMAGE_COMPRESSION = gzip
+
+$(obj)/uImage: vmlinux.bin.gz FORCE
+ $(call if_changed,uimage)
+ $(Q)$(kecho) ' Kernel: $@ is ready'
diff --git a/arch/xtensa/boot/boot-elf/.gitignore b/arch/xtensa/boot/boot-elf/.gitignore
new file mode 100644
index 000000000..5ff8fbb85
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/.gitignore
@@ -0,0 +1 @@
+boot.lds
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
new file mode 100644
index 000000000..12ae1e91c
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -0,0 +1,34 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+ifeq ($(BIG_ENDIAN),1)
+OBJCOPY_ARGS := -O elf32-xtensa-be
+else
+OBJCOPY_ARGS := -O elf32-xtensa-le
+endif
+
+export OBJCOPY_ARGS
+export CPPFLAGS_boot.lds += -P -C
+export KBUILD_AFLAGS += -mtext-section-literals
+
+boot-y := bootstrap.o
+
+OBJS := $(addprefix $(obj)/,$(boot-y))
+
+$(obj)/Image.o: vmlinux.bin $(OBJS)
+ $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
+ --add-section image=vmlinux.bin \
+ --set-section-flags image=contents,alloc,load,load,data \
+ $(OBJS) $@
+
+$(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds
+ $(Q)$(LD) $(KBUILD_LDFLAGS) $(LDFLAGS_vmlinux) \
+ -T $(obj)/boot.lds \
+ --build-id=none \
+ -o $@ $(obj)/Image.o
+ $(Q)$(kecho) ' Kernel: $@ is ready'
+
+all Image: $(obj)/../Image.elf
diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S
new file mode 100644
index 000000000..a30993054
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/boot.lds.S
@@ -0,0 +1,43 @@
+/*
+ * linux/arch/xtensa/boot/boot-elf/boot.lds.S
+ *
+ * Copyright (C) 2008 - 2013 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com
+ * Pete Delaney <piet@tensilica.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/vectors.h>
+OUTPUT_ARCH(xtensa)
+ENTRY(_ResetVector)
+
+SECTIONS
+{
+ .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+ {
+ *(.ResetVector.text)
+ }
+
+ .image KERNELOFFSET: AT (CONFIG_KERNEL_LOAD_ADDRESS)
+ {
+ _image_start = .;
+ *(image)
+ . = (. + 3) & ~ 3;
+ _image_end = . ;
+ }
+
+ .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
+ {
+ __bss_start = .;
+ *(.sbss)
+ *(.scommon)
+ *(.dynbss)
+ *(.bss)
+ __bss_end = .;
+ }
+}
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
new file mode 100644
index 000000000..29c68426a
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -0,0 +1,78 @@
+/*
+ * arch/xtensa/boot/boot-elf/bootstrap.S
+ *
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 - 2013 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com>
+ * Piet Delaney <piet@tensilica.com>
+ */
+
+#include <asm/bootparam.h>
+#include <asm/initialize_mmu.h>
+#include <asm/vectors.h>
+#include <linux/linkage.h>
+
+ .section .ResetVector.text, "ax"
+ .global _ResetVector
+ .global reset
+
+_ResetVector:
+ _j _SetupMMU
+
+ .begin no-absolute-literals
+ .literal_position
+
+#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
+ XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ .literal RomInitAddr, CONFIG_KERNEL_LOAD_ADDRESS
+#else
+ .literal RomInitAddr, KERNELOFFSET
+#endif
+#ifndef CONFIG_PARSE_BOOTPARAM
+ .literal RomBootParam, 0
+#else
+ .literal RomBootParam, _bootparam
+
+ .align 4
+_bootparam:
+ .short BP_TAG_FIRST
+ .short 4
+ .long BP_VERSION
+ .short BP_TAG_LAST
+ .short 0
+ .long 0
+#endif
+
+ .align 4
+_SetupMMU:
+ movi a0, 0
+ wsr a0, windowbase
+ rsync
+ movi a0, 1
+ wsr a0, windowstart
+ rsync
+ movi a0, 0x1F
+ wsr a0, ps
+ rsync
+
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ .end no-absolute-literals
+
+ rsil a0, XCHAL_DEBUGLEVEL-1
+ rsync
+reset:
+ l32r a0, RomInitAddr
+ l32r a2, RomBootParam
+ movi a3, 0
+ movi a4, 0
+ jx a0
diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile
new file mode 100644
index 000000000..8632473ad
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/Makefile
@@ -0,0 +1,35 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+ifeq ($(BIG_ENDIAN),1)
+OBJCOPY_ARGS := -O elf32-xtensa-be
+else
+OBJCOPY_ARGS := -O elf32-xtensa-le
+endif
+
+LD_ARGS = -T $(srctree)/$(obj)/boot.ld
+
+boot-y := bootstrap.o
+
+OBJS := $(addprefix $(obj)/,$(boot-y))
+LIBS := arch/xtensa/boot/lib/lib.a arch/xtensa/lib/lib.a
+
+LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+
+$(obj)/zImage.o: vmlinux.bin.gz $(OBJS)
+ $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
+ --add-section image=vmlinux.bin.gz \
+ --set-section-flags image=contents,alloc,load,load,data \
+ $(OBJS) $@
+
+$(obj)/zImage.elf: $(obj)/zImage.o $(LIBS)
+ $(Q)$(LD) $(LD_ARGS) -o $@ $^ -L/xtensa-elf/lib $(LIBGCC)
+
+$(obj)/../zImage.redboot: $(obj)/zImage.elf
+ $(Q)$(OBJCOPY) -S -O binary $< $@
+ $(Q)$(kecho) ' Kernel: $@ is ready'
+
+all zImage: $(obj)/../zImage.redboot
diff --git a/arch/xtensa/boot/boot-redboot/boot.ld b/arch/xtensa/boot/boot-redboot/boot.ld
new file mode 100644
index 000000000..b0b9e95b5
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/boot.ld
@@ -0,0 +1,59 @@
+OUTPUT_ARCH(xtensa)
+
+SECTIONS
+{
+ .start 0xD1000000 : { *(.start) }
+
+ .text :
+ {
+ __reloc_start = . ;
+ _text_start = . ;
+ *(.literal .text.literal .text)
+ _text_end = . ;
+ }
+
+ .rodata ALIGN(0x04):
+ {
+ *(.rodata)
+ *(.rodata1)
+ }
+
+ .data ALIGN(0x04):
+ {
+ *(.data)
+ *(.data1)
+ *(.sdata)
+ *(.sdata2)
+ *(.got.plt)
+ *(.got)
+ *(.dynamic)
+ }
+
+ __reloc_end = . ;
+
+ . = ALIGN(0x10);
+ __image_load = . ;
+ .image 0xd0003000: AT(__image_load)
+ {
+ _image_start = .;
+ *(image)
+ . = (. + 3) & ~ 3;
+ _image_end = . ;
+ }
+
+
+ .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
+ {
+ __bss_start = .;
+ *(.sbss)
+ *(.scommon)
+ *(.dynbss)
+ *(.bss)
+ __bss_end = .;
+ }
+ _end = .;
+ _param_start = .;
+
+
+ PROVIDE (end = .);
+}
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
new file mode 100644
index 000000000..bbf3b4b08
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <variant/core.h>
+#include <asm/regs.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheasm.h>
+ /*
+ * RB-Data: RedBoot data/bss
+ * P: Boot-Parameters
+ * L: Kernel-Loader
+ *
+ * The Linux-Kernel image including the loader must be loaded
+ * to a position so that the kernel and the boot parameters
+ * can fit in the space before the load address.
+ * ______________________________________________________
+ * |_RB-Data_|_P_|__________|_L_|___Linux-Kernel___|______|
+ * ^
+ * ^ Load address
+ * ______________________________________________________
+ * |___Linux-Kernel___|_P_|_L_|___________________________|
+ *
+ * The loader copies the parameter to the position that will
+ * be the end of the kernel and itself to the end of the
+ * parameter list.
+ */
+
+/* Make sure we have enough space for the 'uncompressor' */
+
+#define STACK_SIZE 32768
+#define HEAP_SIZE (131072*4)
+
+ # a2: Parameter list
+ # a3: Size of parameter list
+
+ .section .start, "ax"
+
+ .globl __start
+ /* this must be the first byte of the loader! */
+__start:
+ entry sp, 32 # we do not intend to return
+ _call0 _start
+__start_a0:
+ .align 4
+
+ .section .text, "ax"
+ .literal_position
+ .begin literal_prefix .text
+
+ /* put literals in here! */
+
+ .globl _start
+_start:
+
+ /* 'reset' window registers */
+
+ movi a4, 1
+ wsr a4, ps
+ rsync
+
+ rsr a5, windowbase
+ ssl a5
+ sll a4, a4
+ wsr a4, windowstart
+ rsync
+
+ movi a4, 0x00040000
+ wsr a4, ps
+ rsync
+
+ /* copy the loader to its address
+ * Note: The loader itself is a very small piece, so we assume we
+ * don't partially overlap. We also assume (even more important)
+ * that the kernel image is out of the way. Usually, when the
+ * load address of this image is not at an arbitrary address,
+ * but aligned to some 10K's we shouldn't overlap.
+ */
+
+ /* Note: The assembler cannot relax "addi a0, a0, ..." to an
+ l32r, so we load to a4 first. */
+
+ # addi a4, a0, __start - __start_a0
+ # mov a0, a4
+
+ movi a4, __start
+ movi a5, __start_a0
+ add a4, a0, a4
+ sub a0, a4, a5
+
+ movi a4, __start
+ movi a5, __reloc_end
+
+ # a0: address where this code has been loaded
+ # a4: compiled address of __start
+ # a5: compiled end address
+
+ mov.n a7, a0
+ mov.n a8, a4
+
+1:
+ l32i a10, a7, 0
+ l32i a11, a7, 4
+ s32i a10, a8, 0
+ s32i a11, a8, 4
+ l32i a10, a7, 8
+ l32i a11, a7, 12
+ s32i a10, a8, 8
+ s32i a11, a8, 12
+ addi a8, a8, 16
+ addi a7, a7, 16
+ blt a8, a5, 1b
+
+
+ /* We have to flush and invalidate the caches here before we jump. */
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+
+ ___flush_dcache_all a5 a6
+
+#endif
+
+ ___invalidate_icache_all a5 a6
+ isync
+
+ movi a11, _reloc
+ jx a11
+
+ .globl _reloc
+_reloc:
+
+ /* RedBoot is now at the end of the memory, so we don't have
+ * to copy the parameter list. Keep the code around; in case
+ * we need it again. */
+#if 0
+ # a0: load address
+ # a2: start address of parameter list
+ # a3: length of parameter list
+ # a4: __start
+
+ /* copy the parameter list out of the way */
+
+ movi a6, _param_start
+ add a3, a2, a3
+2:
+ l32i a8, a2, 0
+ s32i a8, a6, 0
+ addi a2, a2, 4
+ addi a6, a6, 4
+ blt a2, a3, 2b
+#endif
+
+ /* clear BSS section */
+ movi a6, __bss_start
+ movi a7, __bss_end
+ movi.n a5, 0
+3:
+ s32i a5, a6, 0
+ addi a6, a6, 4
+ blt a6, a7, 3b
+
+ movi a5, -16
+ movi a1, _stack + STACK_SIZE
+ and a1, a1, a5
+
+ /* Uncompress the kernel */
+
+ # a0: load address
+ # a2: boot parameter
+ # a4: __start
+
+ movi a3, __image_load
+ sub a4, a3, a4
+ add a8, a0, a4
+
+ # a1 Stack
+ # a8(a4) Load address of the image
+
+ movi a6, _image_start
+ movi a10, _image_end
+ movi a7, 0x1000000
+ sub a11, a10, a6
+ movi a9, complen
+ s32i a11, a9, 0
+
+ movi a0, 0
+
+ # a6 destination
+ # a7 maximum size of destination
+ # a8 source
+ # a9 ptr to length
+
+ .extern gunzip
+ movi a4, gunzip
+ beqz a4, 1f
+
+ callx4 a4
+
+ j 2f
+
+
+ # a6 destination start
+ # a7 maximum size of destination
+ # a8 source start
+ # a9 ptr to length
+ # a10 destination end
+
+1:
+ l32i a9, a8, 0
+ l32i a11, a8, 4
+ s32i a9, a6, 0
+ s32i a11, a6, 4
+ l32i a9, a8, 8
+ l32i a11, a8, 12
+ s32i a9, a6, 8
+ s32i a11, a6, 12
+ addi a6, a6, 16
+ addi a8, a8, 16
+ blt a6, a10, 1b
+
+
+ /* jump to the kernel */
+2:
+#if XCHAL_DCACHE_IS_WRITEBACK
+
+ ___flush_dcache_all a5 a6
+
+#endif
+
+ ___invalidate_icache_all a5 a6
+
+ isync
+
+ # a2 Boot parameter list
+
+ movi a0, _image_start
+ jx a0
+
+ .align 16
+ .data
+ .globl avail_ram
+avail_ram:
+ .long _heap
+ .globl end_avail
+end_avail:
+ .long _heap + HEAP_SIZE
+
+ .comm _stack, STACK_SIZE
+ .comm _heap, HEAP_SIZE
+
+ .globl end_avail
+ .comm complen, 4
+
+ .end literal_prefix
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
new file mode 100644
index 000000000..f8052ba5a
--- /dev/null
+++ b/arch/xtensa/boot/dts/Makefile
@@ -0,0 +1,17 @@
+#
+# arch/xtensa/boot/dts/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
+ifneq ($(CONFIG_BUILTIN_DTB),"")
+obj-$(CONFIG_OF) += $(BUILTIN_DTB)
+endif
+
+# for CONFIG_OF_ALL_DTBS test
+dtstree := $(srctree)/$(src)
+dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/xtensa/boot/dts/csp.dts b/arch/xtensa/boot/dts/csp.dts
new file mode 100644
index 000000000..885495460
--- /dev/null
+++ b/arch/xtensa/boot/dts/csp.dts
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+/ {
+ compatible = "cdns,xtensa-xtfpga";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&pic>;
+
+ chosen {
+ bootargs = "earlycon=cdns,0xfd000000,115200 console=tty0 console=ttyPS0,115200 root=/dev/ram0 rw earlyprintk xilinx_uartps.rx_trigger_level=32 loglevel=8 nohz=off ignore_loglevel";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ compatible = "cdns,xtensa-cpu";
+ reg = <0>;
+ };
+ };
+
+ pic: pic {
+ compatible = "cdns,xtensa-pic";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ clocks {
+ osc: main-oscillator {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf0000000 0x10000000>;
+
+ uart0: serial@0d000000 {
+ compatible = "xlnx,xuartps", "cdns,uart-r1p8";
+ clocks = <&osc>, <&osc>;
+ clock-names = "uart_clk", "pclk";
+ reg = <0x0d000000 0x1000>;
+ interrupts = <0 1>;
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/kc705.dts b/arch/xtensa/boot/dts/kc705.dts
new file mode 100644
index 000000000..6887ff090
--- /dev/null
+++ b/arch/xtensa/boot/dts/kc705.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-128m.dtsi"
+
+/ {
+ compatible = "cdns,xtensa-kc705";
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000";
+ };
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x38000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* global autoconfigured region for contiguous allocations */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x04000000>;
+ alignment = <0x2000>;
+ alloc-ranges = <0x00000000 0x20000000>;
+ linux,cma-default;
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/kc705_nommu.dts b/arch/xtensa/boot/dts/kc705_nommu.dts
new file mode 100644
index 000000000..d8e194a0f
--- /dev/null
+++ b/arch/xtensa/boot/dts/kc705_nommu.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-128m.dtsi"
+
+/ {
+ compatible = "cdns,xtensa-kc705";
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug";
+ };
+ memory@0 {
+ device_type = "memory";
+ reg = <0x60000000 0x10000000>;
+ };
+ soc {
+ ranges = <0x00000000 0x90000000 0x10000000>;
+ };
+};
diff --git a/arch/xtensa/boot/dts/lx200mx.dts b/arch/xtensa/boot/dts/lx200mx.dts
new file mode 100644
index 000000000..974a8d904
--- /dev/null
+++ b/arch/xtensa/boot/dts/lx200mx.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-16m.dtsi"
+
+/ {
+ compatible = "cdns,xtensa-lx200";
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x06000000>;
+ };
+ pic: pic {
+ compatible = "cdns,xtensa-mx";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+};
diff --git a/arch/xtensa/boot/dts/lx60.dts b/arch/xtensa/boot/dts/lx60.dts
new file mode 100644
index 000000000..7c203c1c7
--- /dev/null
+++ b/arch/xtensa/boot/dts/lx60.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-4m.dtsi"
+
+/ {
+ compatible = "cdns,xtensa-lx60";
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x04000000>;
+ };
+};
diff --git a/arch/xtensa/boot/dts/ml605.dts b/arch/xtensa/boot/dts/ml605.dts
new file mode 100644
index 000000000..08e5c8d47
--- /dev/null
+++ b/arch/xtensa/boot/dts/ml605.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-16m.dtsi"
+
+/ {
+ compatible = "cdns,xtensa-ml605";
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x08000000>;
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
new file mode 100644
index 000000000..c33932568
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ soc {
+ flash: flash@00000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x00000000 0x08000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0 {
+ label = "data";
+ reg = <0x00000000 0x06000000>;
+ };
+ partition@6000000 {
+ label = "boot loader area";
+ reg = <0x06000000 0x00800000>;
+ };
+ partition@6800000 {
+ label = "kernel image";
+ reg = <0x06800000 0x017e0000>;
+ };
+ partition@7fe0000 {
+ label = "boot environment";
+ reg = <0x07fe0000 0x00020000>;
+ };
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
new file mode 100644
index 000000000..7bde2ab2d
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ soc {
+ flash: flash@08000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x08000000 0x01000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0 {
+ label = "boot loader area";
+ reg = <0x00000000 0x00400000>;
+ };
+ partition@400000 {
+ label = "kernel image";
+ reg = <0x00400000 0x00600000>;
+ };
+ partition@a00000 {
+ label = "data";
+ reg = <0x00a00000 0x005e0000>;
+ };
+ partition@fe0000 {
+ label = "boot environment";
+ reg = <0x00fe0000 0x00020000>;
+ };
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
new file mode 100644
index 000000000..0655b8687
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ soc {
+ flash: flash@08000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x08000000 0x00400000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0 {
+ label = "boot loader area";
+ reg = <0x00000000 0x003f0000>;
+ };
+ partition@3f0000 {
+ label = "boot environment";
+ reg = <0x003f0000 0x00010000>;
+ };
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
new file mode 100644
index 000000000..e46ae07ba
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/ {
+ compatible = "cdns,xtensa-xtfpga";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&pic>;
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x06000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ compatible = "cdns,xtensa-cpu";
+ reg = <0>;
+ clocks = <&osc>;
+ };
+ };
+
+ pic: pic {
+ compatible = "cdns,xtensa-pic";
+ /* one cell: internal irq number,
+ * two cells: second cell == 0: internal irq number
+ * second cell == 1: external irq number
+ */
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ clocks {
+ clk54: clk54 {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <54000000>;
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf0000000 0x10000000>;
+
+ osc: main-oscillator {
+ #clock-cells = <0>;
+ compatible = "cdns,xtfpga-clock";
+ reg = <0x0d020004 0x4>;
+ };
+
+ serial0: serial@0d050020 {
+ device_type = "serial";
+ compatible = "ns16550a";
+ no-loopback-test;
+ reg = <0x0d050020 0x20>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ native-endian;
+ interrupts = <0 1>; /* external irq 0 */
+ clocks = <&osc>;
+ };
+
+ enet0: ethoc@0d030000 {
+ compatible = "opencores,ethoc";
+ reg = <0x0d030000 0x4000 0x0d800000 0x4000>;
+ native-endian;
+ interrupts = <1 1>; /* external irq 1 */
+ local-mac-address = [00 50 c2 13 6f 00];
+ clocks = <&osc>;
+ };
+
+ i2s0: xtfpga-i2s@0d080000 {
+ #sound-dai-cells = <0>;
+ compatible = "cdns,xtfpga-i2s";
+ reg = <0x0d080000 0x40>;
+ interrupts = <2 1>; /* external irq 2 */
+ clocks = <&cdce706 4>;
+ };
+
+ i2c0: i2c-master@0d090000 {
+ compatible = "opencores,i2c-ocores";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0d090000 0x20>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ native-endian;
+ interrupts = <4 1>;
+ clocks = <&osc>;
+
+ cdce706: clock-synth@69 {
+ compatible = "ti,cdce706";
+ #clock-cells = <1>;
+ reg = <0x69>;
+ clocks = <&clk54>;
+ clock-names = "clk_in0";
+ };
+ };
+
+ spi0: spi@0d0a0000 {
+ compatible = "cdns,xtfpga-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0d0a0000 0xc>;
+
+ tlv320aic23: sound-codec@0 {
+ #sound-dai-cells = <0>;
+ compatible = "tlv320aic23";
+ reg = <0>;
+ spi-max-frequency = <12500000>;
+ };
+ };
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s0>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&tlv320aic23>;
+ simple-audio-card,bitclock-master = <0>;
+ simple-audio-card,frame-master = <0>;
+ clocks = <&cdce706 4>;
+ };
+ };
+};
diff --git a/arch/xtensa/boot/lib/.gitignore b/arch/xtensa/boot/lib/.gitignore
new file mode 100644
index 000000000..1629a6167
--- /dev/null
+++ b/arch/xtensa/boot/lib/.gitignore
@@ -0,0 +1,3 @@
+inffast.c
+inflate.c
+inftrees.c
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
new file mode 100644
index 000000000..355127faa
--- /dev/null
+++ b/arch/xtensa/boot/lib/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for some libs needed by zImage.
+#
+
+zlib := inffast.c inflate.c inftrees.c
+
+lib-y += $(zlib:.c=.o) zmem.o
+
+ccflags-y := -Ilib/zlib_inflate
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_inflate.o = -pg
+CFLAGS_REMOVE_zmem.o = -pg
+CFLAGS_REMOVE_inftrees.o = -pg
+CFLAGS_REMOVE_inffast.o = -pg
+endif
+
+KASAN_SANITIZE := n
+
+CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_inffast.o += -fstack-protector -fstack-protector-strong
+
+quiet_cmd_copy_zlib = COPY $@
+ cmd_copy_zlib = cat $< > $@
+
+$(addprefix $(obj)/,$(zlib)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
+ $(call cmd,copy_zlib)
+
+clean-files := $(zlib)
diff --git a/arch/xtensa/boot/lib/zmem.c b/arch/xtensa/boot/lib/zmem.c
new file mode 100644
index 000000000..e3ecd743c
--- /dev/null
+++ b/arch/xtensa/boot/lib/zmem.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/zlib.h>
+
+/* bits taken from ppc */
+
+extern void *avail_ram, *end_avail;
+
+void exit (void)
+{
+ for (;;);
+}
+
+void *zalloc(unsigned size)
+{
+ void *p = avail_ram;
+
+ size = (size + 7) & -8;
+ avail_ram += size;
+ if (avail_ram > end_avail) {
+ //puts("oops... out of memory\n");
+ //pause();
+ exit ();
+ }
+ return p;
+}
+
+#define HEAD_CRC 2
+#define EXTRA_FIELD 4
+#define ORIG_NAME 8
+#define COMMENT 0x10
+#define RESERVED 0xe0
+
+#define DEFLATED 8
+
+void gunzip (void *dst, int dstlen, unsigned char *src, int *lenp)
+{
+ z_stream s;
+ int r, i, flags;
+
+ /* skip header */
+ i = 10;
+ flags = src[3];
+ if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
+ //puts("bad gzipped data\n");
+ exit();
+ }
+ if ((flags & EXTRA_FIELD) != 0)
+ i = 12 + src[10] + (src[11] << 8);
+ if ((flags & ORIG_NAME) != 0)
+ while (src[i++] != 0)
+ ;
+ if ((flags & COMMENT) != 0)
+ while (src[i++] != 0)
+ ;
+ if ((flags & HEAD_CRC) != 0)
+ i += 2;
+ if (i >= *lenp) {
+ //puts("gunzip: ran out of data in header\n");
+ exit();
+ }
+
+ s.workspace = zalloc(zlib_inflate_workspacesize());
+ r = zlib_inflateInit2(&s, -MAX_WBITS);
+ if (r != Z_OK) {
+ //puts("inflateInit2 returned "); puthex(r); puts("\n");
+ exit();
+ }
+ s.next_in = src + i;
+ s.avail_in = *lenp - i;
+ s.next_out = dst;
+ s.avail_out = dstlen;
+ r = zlib_inflate(&s, Z_FINISH);
+ if (r != Z_OK && r != Z_STREAM_END) {
+ //puts("inflate returned "); puthex(r); puts("\n");
+ exit();
+ }
+ *lenp = s.next_out - (unsigned char *) dst;
+ zlib_inflateEnd(&s);
+}
+
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
new file mode 100644
index 000000000..2bf964df3
--- /dev/null
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -0,0 +1,141 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_CUSTOM=y
+CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_kc705_hifi"
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="kc705"
+# CONFIG_COMPACTION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_OCORES=y
+CONFIG_SPI=y
+CONFIG_SPI_XTENSA_XTFPGA=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_XTFPGA_I2S=y
+CONFIG_SND_SOC_TLV320AIC23_SPI=y
+CONFIG_SND_SIMPLE_CARD=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_COMMON_CLK_CDCE706=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+# CONFIG_S32C1I_SELFTEST is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
new file mode 100644
index 000000000..3221b7053
--- /dev/null
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -0,0 +1,121 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_USELIB=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="$$KERNEL_INITRAMFS_SOURCE"
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_CUSTOM=y
+CONFIG_XTENSA_VARIANT_CUSTOM_NAME="csp"
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="csp"
+# CONFIG_COMPACTION is not set
+CONFIG_XTFPGA_LCD=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_RCU_TRACE=y
+CONFIG_FUNCTION_TRACER=y
+# CONFIG_S32C1I_SELFTEST is not set
+# CONFIG_CRYPTO_ECHAINIV is not set
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
new file mode 100644
index 000000000..4bcc76b02
--- /dev/null
+++ b/arch/xtensa/configs/common_defconfig
@@ -0,0 +1,54 @@
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
+CONFIG_XTENSA_PLATFORM_XT2000=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,38400 ip=bootp root=nfs nfsroot=/opt/montavista/pro/devkit/xtensa/linux_be/target memmap=128M@0"
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+# CONFIG_FRAME_POINTER is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
new file mode 100644
index 000000000..985fa8546
--- /dev/null
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -0,0 +1,129 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_DC233C=y
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="kc705"
+# CONFIG_COMPACTION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_LD_NO_RELAX=y
+# CONFIG_S32C1I_SELFTEST is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
new file mode 100644
index 000000000..4bb5b76d9
--- /dev/null
+++ b/arch/xtensa/configs/iss_defconfig
@@ -0,0 +1,34 @@
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_PCI is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,38400 eth0=tuntap,,tap0 ip=192.168.168.5:192.168.168.1 root=nfs nfsroot=192.168.168.1:/opt/montavista/pro/devkit/xtensa/linux_be/target memmap=128M@0"
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+# CONFIG_FRAME_POINTER is not set
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
new file mode 100644
index 000000000..f3fc4f970
--- /dev/null
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -0,0 +1,130 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PERF_EVENTS=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_CUSTOM=y
+CONFIG_XTENSA_VARIANT_CUSTOM_NAME="de212"
+# CONFIG_XTENSA_VARIANT_MMU is not set
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_MEMMAP_CACHEATTR=0xfff2442f
+# CONFIG_PCI is not set
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="kc705_nommu"
+CONFIG_BINFMT_FLAT=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_FRAME_POINTER is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_NOMMU_REGIONS=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+# CONFIG_RCU_CPU_STALL_INFO is not set
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+# CONFIG_LD_NO_RELAX is not set
+# CONFIG_CRYPTO_ECHAINIV is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
new file mode 100644
index 000000000..b5938160f
--- /dev/null
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -0,0 +1,134 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_XTENSA_VARIANT_CUSTOM=y
+CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_mmuhifi_c3"
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_PREEMPT=y
+CONFIG_HAVE_SMP=y
+CONFIG_SMP=y
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
+# CONFIG_PCI is not set
+CONFIG_VECTORS_OFFSET=0x00002000
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB="lx200mx"
+# CONFIG_COMPACTION is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_VM=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+CONFIG_LD_NO_RELAX=y
+# CONFIG_S32C1I_SELFTEST is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
new file mode 100644
index 000000000..82c756431
--- /dev/null
+++ b/arch/xtensa/include/asm/Kbuild
@@ -0,0 +1,30 @@
+generic-y += bug.h
+generic-y += compat.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma-contiguous.h
+generic-y += dma-mapping.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += extable.h
+generic-y += fb.h
+generic-y += hardirq.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kprobes.h
+generic-y += linkage.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
+generic-y += param.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += rwsem.h
+generic-y += sections.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/xtensa/include/asm/asm-offsets.h b/arch/xtensa/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h
new file mode 100644
index 000000000..dfdf9fae1
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-uaccess.h
@@ -0,0 +1,157 @@
+/*
+ * include/asm-xtensa/uaccess.h
+ *
+ * User space memory access functions
+ *
+ * These routines provide basic accessing functions to the user memory
+ * space for the kernel. This header file provides functions such as:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ASM_UACCESS_H
+#define _XTENSA_ASM_UACCESS_H
+
+#include <linux/errno.h>
+#include <asm/types.h>
+
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+
+/*
+ * These assembly macros mirror the C macros in asm/uaccess.h. They
+ * should always have identical functionality. See
+ * arch/xtensa/kernel/sys.S for usage.
+ */
+
+#define KERNEL_DS 0
+#define USER_DS 1
+
+#define get_ds (KERNEL_DS)
+
+/*
+ * get_fs reads current->thread.current_ds into a register.
+ * On Entry:
+ * <ad> anything
+ * <sp> stack
+ * On Exit:
+ * <ad> contains current->thread.current_ds
+ */
+ .macro get_fs ad, sp
+ GET_CURRENT(\ad,\sp)
+#if THREAD_CURRENT_DS > 1020
+ addi \ad, \ad, TASK_THREAD
+ l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
+#else
+ l32i \ad, \ad, THREAD_CURRENT_DS
+#endif
+ .endm
+
+/*
+ * set_fs sets current->thread.current_ds to some value.
+ * On Entry:
+ * <at> anything (temp register)
+ * <av> value to write
+ * <sp> stack
+ * On Exit:
+ * <at> destroyed (actually, current)
+ * <av> preserved, value to write
+ */
+ .macro set_fs at, av, sp
+ GET_CURRENT(\at,\sp)
+ s32i \av, \at, THREAD_CURRENT_DS
+ .endm
+
+/*
+ * kernel_ok determines whether we should bypass addr/size checking.
+ * See the equivalent C-macro version below for clarity.
+ * On success, kernel_ok branches to a label indicated by parameter
+ * <success>. This implies that the macro falls through to the next
+ * insruction on an error.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on error).
+ *
+ * On Entry:
+ * <at> anything (temp register)
+ * <success> label to branch to on success; implies
+ * fall-through macro on error
+ * <sp> stack pointer
+ * On Exit:
+ * <at> destroyed (actually, current->thread.current_ds)
+ */
+
+#if ((KERNEL_DS != 0) || (USER_DS == 0))
+# error Assembly macro kernel_ok fails
+#endif
+ .macro kernel_ok at, sp, success
+ get_fs \at, \sp
+ beqz \at, \success
+ .endm
+
+/*
+ * user_ok determines whether the access to user-space memory is allowed.
+ * See the equivalent C-macro version below for clarity.
+ *
+ * On error, user_ok branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on success).
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed (actually, (TASK_SIZE + 1 - size))
+ */
+ .macro user_ok aa, as, at, error
+ movi \at, __XTENSA_UL_CONST(TASK_SIZE)
+ bgeu \as, \at, \error
+ sub \at, \at, \as
+ bgeu \aa, \at, \error
+ .endm
+
+/*
+ * access_ok determines whether a memory access is allowed. See the
+ * equivalent C-macro version below for clarity.
+ *
+ * On error, access_ok branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that we assume success is the common case, and we optimize the
+ * branch fall-through case on success.
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <sp>
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed
+ */
+ .macro access_ok aa, as, at, sp, error
+ kernel_ok \at, \sp, .Laccess_ok_\@
+ user_ok \aa, \as, \at, \error
+.Laccess_ok_\@:
+ .endm
+
+#endif /* _XTENSA_ASM_UACCESS_H */
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
new file mode 100644
index 000000000..7f2ae5872
--- /dev/null
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -0,0 +1,194 @@
+/*
+ * include/asm-xtensa/asmmacro.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ASMMACRO_H
+#define _XTENSA_ASMMACRO_H
+
+#include <variant/core.h>
+
+/*
+ * Some little helpers for loops. Use zero-overhead-loops
+ * where applicable and if supported by the processor.
+ *
+ * __loopi ar, at, size, inc
+ * ar register initialized with the start address
+ * at scratch register used by macro
+ * size size immediate value
+ * inc increment
+ *
+ * __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
+ * ar register initialized with the start address
+ * as register initialized with the size
+ * at scratch register use by macro
+ * inc_log2 increment [in log2]
+ * mask_log2 mask [in log2]
+ * cond true condition (used in loop'cond')
+ * ncond false condition (used in b'ncond')
+ *
+ * __loop as
+ * restart loop. 'as' register must not have been modified!
+ *
+ * __endla ar, as, incr
+ * ar start address (modified)
+ * as scratch register used by __loops/__loopi macros or
+ * end address used by __loopt macro
+ * inc increment
+ */
+
+/*
+ * loop for given size as immediate
+ */
+
+ .macro __loopi ar, at, size, incr
+
+#if XCHAL_HAVE_LOOPS
+ movi \at, ((\size + \incr - 1) / (\incr))
+ loop \at, 99f
+#else
+ addi \at, \ar, \size
+ 98:
+#endif
+
+ .endm
+
+/*
+ * loop for given size in register
+ */
+
+ .macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
+
+#if XCHAL_HAVE_LOOPS
+ .ifgt \incr_log2 - 1
+ addi \at, \as, (1 << \incr_log2) - 1
+ .ifnc \mask_log2,
+ extui \at, \at, \incr_log2, \mask_log2
+ .else
+ srli \at, \at, \incr_log2
+ .endif
+ .endif
+ loop\cond \at, 99f
+#else
+ .ifnc \mask_log2,
+ extui \at, \as, \incr_log2, \mask_log2
+ .else
+ .ifnc \ncond,
+ srli \at, \as, \incr_log2
+ .endif
+ .endif
+ .ifnc \ncond,
+ b\ncond \at, 99f
+
+ .endif
+ .ifnc \mask_log2,
+ slli \at, \at, \incr_log2
+ add \at, \ar, \at
+ .else
+ add \at, \ar, \as
+ .endif
+#endif
+ 98:
+
+ .endm
+
+/*
+ * loop from ar to as
+ */
+
+ .macro __loopt ar, as, at, incr_log2
+
+#if XCHAL_HAVE_LOOPS
+ sub \at, \as, \ar
+ .ifgt \incr_log2 - 1
+ addi \at, \at, (1 << \incr_log2) - 1
+ srli \at, \at, \incr_log2
+ .endif
+ loop \at, 99f
+#else
+ 98:
+#endif
+
+ .endm
+
+/*
+ * restart loop. registers must be unchanged
+ */
+
+ .macro __loop as
+
+#if XCHAL_HAVE_LOOPS
+ loop \as, 99f
+#else
+ 98:
+#endif
+
+ .endm
+
+/*
+ * end of loop with no increment of the address.
+ */
+
+ .macro __endl ar, as
+#if !XCHAL_HAVE_LOOPS
+ bltu \ar, \as, 98b
+#endif
+ 99:
+ .endm
+
+/*
+ * end of loop with increment of the address.
+ */
+
+ .macro __endla ar, as, incr
+ addi \ar, \ar, \incr
+ __endl \ar \as
+ .endm
+
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(handler) \
+ .section __ex_table, "a"; \
+ .word 97f, handler; \
+ .previous \
+97:
+
+
+/*
+ * Extract unaligned word that is split between two registers w0 and w1
+ * into r regardless of machine endianness. SAR must be loaded with the
+ * starting bit of the word (see __ssa8).
+ */
+
+ .macro __src_b r, w0, w1
+#ifdef __XTENSA_EB__
+ src \r, \w0, \w1
+#else
+ src \r, \w1, \w0
+#endif
+ .endm
+
+/*
+ * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned
+ * word starting at r from two registers loaded from consecutive aligned
+ * addresses covering r regardless of machine endianness.
+ *
+ * r 0 1 2 3
+ * LE SAR 0 8 16 24
+ * BE SAR 32 24 16 8
+ */
+
+ .macro __ssa8 r
+#ifdef __XTENSA_EB__
+ ssa8b \r
+#else
+ ssa8l \r
+#endif
+ .endm
+
+#endif /* _XTENSA_ASMMACRO_H */
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
new file mode 100644
index 000000000..7de0149e1
--- /dev/null
+++ b/arch/xtensa/include/asm/atomic.h
@@ -0,0 +1,205 @@
+/*
+ * include/asm-xtensa/atomic.h
+ *
+ * Atomic operations that C can't guarantee us. Useful for resource counting..
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ATOMIC_H
+#define _XTENSA_ATOMIC_H
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+#include <asm/processor.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#define ATOMIC_INIT(i) { (i) }
+
+/*
+ * This Xtensa implementation assumes that the right mechanism
+ * for exclusion is for locking interrupts to level EXCM_LEVEL.
+ *
+ * Locking interrupts looks like this:
+ *
+ * rsil a15, TOPLEVEL
+ * <code>
+ * wsr a15, PS
+ * rsync
+ *
+ * Note that a15 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a15 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define atomic_read(v) READ_ONCE((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+
+#if XCHAL_HAVE_S32C1I
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %1, %3, 0\n" \
+ " wsr %1, scompare1\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32c1i %0, %3, 0\n" \
+ " bne %0, %1, 1b\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %1, %3, 0\n" \
+ " wsr %1, scompare1\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32c1i %0, %3, 0\n" \
+ " bne %0, %1, 1b\n" \
+ " " #op " %0, %0, %2\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %1, %3, 0\n" \
+ " wsr %1, scompare1\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32c1i %0, %3, 0\n" \
+ " bne %0, %1, 1b\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned int vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15, "__stringify(TOPLEVEL)"\n"\
+ " l32i %0, %2, 0\n" \
+ " " #op " %0, %0, %1\n" \
+ " s32i %0, %2, 0\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : "=&a" (vval) \
+ : "a" (i), "a" (v) \
+ : "a15", "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned int vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15,"__stringify(TOPLEVEL)"\n" \
+ " l32i %0, %2, 0\n" \
+ " " #op " %0, %0, %1\n" \
+ " s32i %0, %2, 0\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : "=&a" (vval) \
+ : "a" (i), "a" (v) \
+ : "a15", "memory" \
+ ); \
+ \
+ return vval; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t * v) \
+{ \
+ unsigned int tmp, vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15,"__stringify(TOPLEVEL)"\n" \
+ " l32i %0, %3, 0\n" \
+ " " #op " %1, %0, %2\n" \
+ " s32i %1, %3, 0\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : "=&a" (vval), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "a15", "memory" \
+ ); \
+ \
+ return vval; \
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
+
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif /* __KERNEL__ */
+
+#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
new file mode 100644
index 000000000..956596e4d
--- /dev/null
+++ b/arch/xtensa/include/asm/barrier.h
@@ -0,0 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2012 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SYSTEM_H
+#define _XTENSA_SYSTEM_H
+
+#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
+#define rmb() barrier()
+#define wmb() mb()
+
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+
+#include <asm-generic/barrier.h>
+
+#endif /* _XTENSA_SYSTEM_H */
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
new file mode 100644
index 000000000..d34901897
--- /dev/null
+++ b/arch/xtensa/include/asm/bitops.h
@@ -0,0 +1,237 @@
+/*
+ * include/asm-xtensa/bitops.h
+ *
+ * Atomic operations that C can't guarantee us.Useful for resource counting etc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_BITOPS_H
+#define _XTENSA_BITOPS_H
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm/processor.h>
+#include <asm/byteorder.h>
+#include <asm/barrier.h>
+
+#include <asm-generic/bitops/non-atomic.h>
+
+#if XCHAL_HAVE_NSA
+
+static inline unsigned long __cntlz (unsigned long x)
+{
+ int lz;
+ asm ("nsau %0, %1" : "=r" (lz) : "r" (x));
+ return lz;
+}
+
+/*
+ * ffz: Find first zero in word. Undefined if no zero exists.
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+static inline int ffz(unsigned long x)
+{
+ return 31 - __cntlz(~x & -~x);
+}
+
+/*
+ * __ffs: Find first bit set in word. Return 0 for bit 0
+ */
+
+static inline unsigned long __ffs(unsigned long x)
+{
+ return 31 - __cntlz(x & -x);
+}
+
+/*
+ * ffs: Find first bit set in word. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+static inline int ffs(unsigned long x)
+{
+ return 32 - __cntlz(x & -x);
+}
+
+/*
+ * fls: Find last (most-significant) bit set in word.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static inline int fls (unsigned int x)
+{
+ return 32 - __cntlz(x);
+}
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ return 31 - __cntlz(word);
+}
+#else
+
+/* Use the generic implementation if we don't have the nsa/nsau instructions. */
+
+# include <asm-generic/bitops/ffs.h>
+# include <asm-generic/bitops/__ffs.h>
+# include <asm-generic/bitops/ffz.h>
+# include <asm-generic/bitops/fls.h>
+# include <asm-generic/bitops/__fls.h>
+
+#endif
+
+#include <asm-generic/bitops/fls64.h>
+
+#if XCHAL_HAVE_S32C1I
+
+static inline void set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+}
+
+static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (~mask), "a" (p)
+ : "memory");
+}
+
+static inline void change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " xor %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+}
+
+static inline int
+test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+static inline int
+test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (~mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+static inline int
+test_and_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " xor %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+#else
+
+#include <asm-generic/bitops/atomic.h>
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/sched.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _XTENSA_BITOPS_H */
diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h
new file mode 100644
index 000000000..892aab399
--- /dev/null
+++ b/arch/xtensa/include/asm/bootparam.h
@@ -0,0 +1,50 @@
+/*
+ * include/asm-xtensa/bootparam.h
+ *
+ * Definition of the Linux/Xtensa boot parameter structure
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * (Concept borrowed from the 68K port)
+ */
+
+#ifndef _XTENSA_BOOTPARAM_H
+#define _XTENSA_BOOTPARAM_H
+
+#define BP_VERSION 0x0001
+
+#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
+#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
+#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
+#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console. */
+#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
+#define BP_TAG_FDT 0x1006 /* flat device tree addr */
+
+#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
+#define BP_TAG_LAST 0x7E0B /* last tag */
+
+#ifndef __ASSEMBLY__
+
+/* All records are aligned to 4 bytes */
+
+typedef struct bp_tag {
+ unsigned short id; /* tag id */
+ unsigned short size; /* size of this record excluding the structure*/
+ unsigned long data[0]; /* data */
+} bp_tag_t;
+
+struct bp_meminfo {
+ unsigned long type;
+ unsigned long start;
+ unsigned long end;
+};
+
+#define MEMORY_TYPE_CONVENTIONAL 0x1000
+#define MEMORY_TYPE_NONE 0x2000
+
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/bugs.h b/arch/xtensa/include/asm/bugs.h
new file mode 100644
index 000000000..69b29d198
--- /dev/null
+++ b/arch/xtensa/include/asm/bugs.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-xtensa/bugs.h
+ *
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Xtensa processors don't have any bugs. :)
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_BUGS_H
+#define _XTENSA_BUGS_H
+
+static void check_bugs(void) { }
+
+#endif /* _XTENSA_BUGS_H */
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
new file mode 100644
index 000000000..d2fd932fd
--- /dev/null
+++ b/arch/xtensa/include/asm/cache.h
@@ -0,0 +1,34 @@
+/*
+ * include/asm-xtensa/cache.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHE_H
+#define _XTENSA_CACHE_H
+
+#include <variant/core.h>
+
+#define L1_CACHE_SHIFT XCHAL_DCACHE_LINEWIDTH
+#define L1_CACHE_BYTES XCHAL_DCACHE_LINESIZE
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
+#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
+#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
+#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
+
+/* Maximum cache size per way. */
+#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
+# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
+#else
+# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
+#endif
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#endif /* _XTENSA_CACHE_H */
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
new file mode 100644
index 000000000..34545ecfd
--- /dev/null
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -0,0 +1,214 @@
+/*
+ * include/asm-xtensa/cacheasm.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Tensilica Inc.
+ */
+
+#include <asm/cache.h>
+#include <asm/asmmacro.h>
+#include <linux/stringify.h>
+
+/*
+ * Define cache functions as macros here so that they can be used
+ * by the kernel and boot loader. We should consider moving them to a
+ * library that can be linked by both.
+ *
+ * Locking
+ *
+ * ___unlock_dcache_all
+ * ___unlock_icache_all
+ *
+ * Flush and invaldating
+ *
+ * ___flush_invalidate_dcache_{all|range|page}
+ * ___flush_dcache_{all|range|page}
+ * ___invalidate_dcache_{all|range|page}
+ * ___invalidate_icache_{all|range|page}
+ *
+ */
+
+
+ .macro __loop_cache_unroll ar at insn size line_width max_immed
+
+ .if (1 << (\line_width)) > (\max_immed)
+ .set _reps, 1
+ .elseif (2 << (\line_width)) > (\max_immed)
+ .set _reps, 2
+ .else
+ .set _reps, 4
+ .endif
+
+ __loopi \ar, \at, \size, (_reps << (\line_width))
+ .set _index, 0
+ .rep _reps
+ \insn \ar, _index << (\line_width)
+ .set _index, _index + 1
+ .endr
+ __endla \ar, \at, _reps << (\line_width)
+
+ .endm
+
+
+ .macro __loop_cache_all ar at insn size line_width max_immed
+
+ movi \ar, 0
+ __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
+
+ .endm
+
+
+ .macro __loop_cache_range ar as at insn line_width
+
+ extui \at, \ar, 0, \line_width
+ add \as, \as, \at
+
+ __loops \ar, \as, \at, \line_width
+ \insn \ar, 0
+ __endla \ar, \at, (1 << (\line_width))
+
+ .endm
+
+
+ .macro __loop_cache_page ar at insn line_width max_immed
+
+ __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
+
+ .endm
+
+
+ .macro ___unlock_dcache_all ar at
+
+#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___unlock_icache_all ar at
+
+#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
+ __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
+ XCHAL_ICACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___flush_invalidate_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 240
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_all ar at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
+ XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_all ar at
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
+ XCHAL_ICACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+
+ .macro ___flush_invalidate_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_range ar as at
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_range ar as at
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
+#endif
+
+ .endm
+
+
+
+ .macro ___flush_invalidate_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___flush_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_dcache_page ar as
+
+#if XCHAL_DCACHE_SIZE
+ __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
+#endif
+
+ .endm
+
+
+ .macro ___invalidate_icache_page ar as
+
+#if XCHAL_ICACHE_SIZE
+ __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
+#endif
+
+ .endm
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
new file mode 100644
index 000000000..a0d50be5a
--- /dev/null
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -0,0 +1,182 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHEFLUSH_H
+#define _XTENSA_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+
+/*
+ * Lo-level routines for cache flushing.
+ *
+ * invalidate data or instruction cache:
+ *
+ * __invalidate_icache_all()
+ * __invalidate_icache_page(adr)
+ * __invalidate_dcache_page(adr)
+ * __invalidate_icache_range(from,size)
+ * __invalidate_dcache_range(from,size)
+ *
+ * flush data cache:
+ *
+ * __flush_dcache_page(adr)
+ *
+ * flush and invalidate data cache:
+ *
+ * __flush_invalidate_dcache_all()
+ * __flush_invalidate_dcache_page(adr)
+ * __flush_invalidate_dcache_range(from,size)
+ *
+ * specials for cache aliasing:
+ *
+ * __flush_invalidate_dcache_page_alias(vaddr,paddr)
+ * __invalidate_dcache_page_alias(vaddr,paddr)
+ * __invalidate_icache_page_alias(vaddr,paddr)
+ */
+
+extern void __invalidate_dcache_all(void);
+extern void __invalidate_icache_all(void);
+extern void __invalidate_dcache_page(unsigned long);
+extern void __invalidate_icache_page(unsigned long);
+extern void __invalidate_icache_range(unsigned long, unsigned long);
+extern void __invalidate_dcache_range(unsigned long, unsigned long);
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+extern void __flush_invalidate_dcache_all(void);
+extern void __flush_dcache_page(unsigned long);
+extern void __flush_dcache_range(unsigned long, unsigned long);
+extern void __flush_invalidate_dcache_page(unsigned long);
+extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
+#else
+static inline void __flush_dcache_page(unsigned long va)
+{
+}
+static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
+{
+}
+# define __flush_invalidate_dcache_all() __invalidate_dcache_all()
+# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
+# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
+#endif
+
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
+extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
+extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
+#else
+static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+static inline void __invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+#endif
+#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
+extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
+#else
+static inline void __invalidate_icache_page_alias(unsigned long virt,
+ unsigned long phys) { }
+#endif
+
+/*
+ * We have physically tagged caches - nothing to do here -
+ * unless we have cache aliasing.
+ *
+ * Pages can get remapped. Because this might change the 'color' of that page,
+ * we have to flush the cache before the PTE is changed.
+ * (see also Documentation/core-api/cachetlb.rst)
+ */
+
+#if defined(CONFIG_MMU) && \
+ ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
+
+#ifdef CONFIG_SMP
+void flush_cache_all(void);
+void flush_cache_range(struct vm_area_struct*, ulong, ulong);
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_cache_page(struct vm_area_struct*,
+ unsigned long, unsigned long);
+#else
+#define flush_cache_all local_flush_cache_all
+#define flush_cache_range local_flush_cache_range
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page local_flush_cache_page
+#endif
+
+#define local_flush_cache_all() \
+ do { \
+ __flush_invalidate_dcache_all(); \
+ __invalidate_icache_all(); \
+ } while (0)
+
+#define flush_cache_mm(mm) flush_cache_all()
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+#define flush_cache_vmap(start,end) flush_cache_all()
+#define flush_cache_vunmap(start,end) flush_cache_all()
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page*);
+
+void local_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn);
+
+#else
+
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+
+#define flush_cache_vmap(start,end) do { } while (0)
+#define flush_cache_vunmap(start,end) do { } while (0)
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do { } while (0)
+
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page(vma, addr, pfn) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+
+#endif
+
+/* Ensure consistency between data and instruction cache. */
+#define local_flush_icache_range(start, end) \
+ do { \
+ __flush_dcache_range(start, (end) - (start)); \
+ __invalidate_icache_range(start,(end) - (start)); \
+ } while (0)
+
+/* This is not required, see Documentation/core-api/cachetlb.rst */
+#define flush_icache_page(vma,page) do { } while (0)
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+extern void copy_to_user_page(struct vm_area_struct*, struct page*,
+ unsigned long, void*, const void*, unsigned long);
+extern void copy_from_user_page(struct vm_area_struct*, struct page*,
+ unsigned long, void*, const void*, unsigned long);
+
+#else
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ __flush_dcache_range((unsigned long) dst, len); \
+ __invalidate_icache_range((unsigned long) dst, len); \
+ } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif
+
+#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
new file mode 100644
index 000000000..3ae74d7e0
--- /dev/null
+++ b/arch/xtensa/include/asm/checksum.h
@@ -0,0 +1,254 @@
+/*
+ * include/asm-xtensa/checksum.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CHECKSUM_H
+#define _XTENSA_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <linux/uaccess.h>
+#include <variant/core.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the access_ok().
+ */
+static inline
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
+{
+ return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+}
+
+static inline
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *err_ptr)
+{
+ return csum_partial_copy_generic((__force const void *)src, dst,
+ len, sum, err_ptr, NULL);
+}
+
+/*
+ * Fold a partial checksum
+ */
+
+static __inline__ __sum16 csum_fold(__wsum sum)
+{
+ unsigned int __dummy;
+ __asm__("extui %1, %0, 16, 16\n\t"
+ "extui %0 ,%0, 0, 16\n\t"
+ "add %0, %0, %1\n\t"
+ "slli %1, %0, 16\n\t"
+ "add %0, %0, %1\n\t"
+ "extui %0, %0, 16, 16\n\t"
+ "neg %0, %0\n\t"
+ "addi %0, %0, -1\n\t"
+ "extui %0, %0, 0, 16\n\t"
+ : "=r" (sum), "=&r" (__dummy)
+ : "0" (sum));
+ return (__force __sum16)sum;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum, tmp, endaddr;
+
+ __asm__ __volatile__(
+ "sub %0, %0, %0\n\t"
+#if XCHAL_HAVE_LOOPS
+ "loopgtz %2, 2f\n\t"
+#else
+ "beqz %2, 2f\n\t"
+ "slli %4, %2, 2\n\t"
+ "add %4, %4, %1\n\t"
+ "0:\t"
+#endif
+ "l32i %3, %1, 0\n\t"
+ "add %0, %0, %3\n\t"
+ "bgeu %0, %3, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "addi %1, %1, 4\n\t"
+#if !XCHAL_HAVE_LOOPS
+ "blt %1, %4, 0b\n\t"
+#endif
+ "2:\t"
+ /* Since the input registers which are loaded with iph and ihl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
+ "=&r" (endaddr)
+ : "1" (iph), "2" (ihl)
+ : "memory");
+
+ return csum_fold(sum);
+}
+
+static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+
+#ifdef __XTENSA_EL__
+ unsigned long len_proto = (len + proto) << 8;
+#elif defined(__XTENSA_EB__)
+ unsigned long len_proto = len + proto;
+#else
+# error processor byte order undefined!
+#endif
+ __asm__("add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %2\n\t"
+ "bgeu %0, %2, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %3\n\t"
+ "bgeu %0, %3, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ : "=r" (sum), "=r" (len_proto)
+ : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold (csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto,
+ __wsum sum)
+{
+ unsigned int __dummy;
+ __asm__("l32i %1, %2, 0\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 4\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 8\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 12\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 0\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 4\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 8\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 12\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %4\n\t"
+ "bgeu %0, %4, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %5\n\t"
+ "bgeu %0, %5, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ : "=r" (sum), "=&r" (__dummy)
+ : "r" (saddr), "r" (daddr),
+ "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
+ : "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst, int len,
+ __wsum sum, int *err_ptr)
+{
+ if (access_ok(VERIFY_WRITE, dst, len))
+ return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
+
+ if (len)
+ *err_ptr = -EFAULT;
+
+ return (__force __wsum)-1; /* invalid checksum */
+}
+#endif
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
new file mode 100644
index 000000000..201e9009e
--- /dev/null
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -0,0 +1,162 @@
+/*
+ * Atomic xchg and cmpxchg operations.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CMPXCHG_H
+#define _XTENSA_CMPXCHG_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+/*
+ * cmpxchg
+ */
+
+static inline unsigned long
+__cmpxchg_u32(volatile int *p, int old, int new)
+{
+#if XCHAL_HAVE_S32C1I
+ __asm__ __volatile__(
+ " wsr %2, scompare1\n"
+ " s32c1i %0, %1, 0\n"
+ : "+a" (new)
+ : "a" (p), "a" (old)
+ : "memory"
+ );
+
+ return new;
+#else
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(TOPLEVEL)"\n"
+ " l32i %0, %1, 0\n"
+ " bne %0, %2, 1f\n"
+ " s32i %3, %1, 0\n"
+ "1:\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (old)
+ : "a" (p), "a" (old), "r" (new)
+ : "a15", "memory");
+ return old;
+#endif
+}
+/* This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg(). */
+
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4: return __cmpxchg_u32(ptr, old, new);
+ default: __cmpxchg_called_with_bad_pointer();
+ return old;
+ }
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof (*(ptr))); \
+ })
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+
+/*
+ * xchg_u32
+ *
+ * Note that a15 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a15 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
+{
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp, result;
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " mov %0, %3\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "memory"
+ );
+ return result;
+#else
+ unsigned long tmp;
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(TOPLEVEL)"\n"
+ " l32i %0, %1, 0\n"
+ " s32i %2, %1, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "a15", "memory");
+ return tmp;
+#endif
+}
+
+#define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+/*
+ * This only works if the compiler isn't horribly bad at optimizing.
+ * gcc-2.5.8 reportedly can't handle this, but I define that one to
+ * be dead anyway.
+ */
+
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 4:
+ return xchg_u32(ptr, x);
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _XTENSA_CMPXCHG_H */
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
new file mode 100644
index 000000000..677501b32
--- /dev/null
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -0,0 +1,177 @@
+/*
+ * include/asm-xtensa/coprocessor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
+ */
+
+
+#ifndef _XTENSA_COPROCESSOR_H
+#define _XTENSA_COPROCESSOR_H
+
+#include <linux/stringify.h>
+#include <variant/core.h>
+#include <variant/tie.h>
+#include <asm/types.h>
+
+#ifdef __ASSEMBLY__
+# include <variant/tie-asm.h>
+
+.macro xchal_sa_start a b
+ .set .Lxchal_pofs_, 0
+ .set .Lxchal_ofs_, 0
+.endm
+
+.macro xchal_sa_align ptr minofs maxofs ofsalign totalign
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1
+ .set .Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_
+.endm
+
+#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
+ | XTHAL_SAS_CC \
+ | XTHAL_SAS_CALR | XTHAL_SAS_CALE )
+
+.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_OPT_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+
+.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_OPT_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+#undef _SELECT
+
+#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
+ | XTHAL_SAS_NOCC \
+ | XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
+
+.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_USER_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+
+.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset
+ .if XTREGS_USER_SIZE > 0
+ addi \clb, \ptr, \offset
+ xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
+ .endif
+.endm
+#undef _SELECT
+
+
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
+ *
+ * XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured.
+ *
+ */
+
+#define XTENSA_HAVE_COPROCESSOR(x) \
+ ((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x)))
+#define XTENSA_HAVE_COPROCESSORS \
+ (XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK)
+#define XTENSA_HAVE_IO_PORT(x) \
+ (XCHAL_CP_PORT_MASK & (1 << (x)))
+#define XTENSA_HAVE_IO_PORTS \
+ XCHAL_CP_PORT_MASK
+
+#ifndef __ASSEMBLY__
+
+
+#if XCHAL_HAVE_CP
+
+#define RSR_CPENABLE(x) do { \
+ __asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \
+ } while(0);
+#define WSR_CPENABLE(x) do { \
+ __asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \
+ } while(0);
+
+#endif /* XCHAL_HAVE_CP */
+
+
+/*
+ * Additional registers.
+ * We define three types of additional registers:
+ * ext: extra registers that are used by the compiler
+ * cpn: optional registers that can be used by a user application
+ * cpX: coprocessor registers that can only be used if the corresponding
+ * CPENABLE bit is set.
+ */
+
+#define XCHAL_SA_REG(list,cc,abi,type,y,name,z,align,size,...) \
+ __REG ## list (cc, abi, type, name, size, align)
+
+#define __REG0(cc,abi,t,name,s,a) __REG0_ ## cc (abi,name)
+#define __REG1(cc,abi,t,name,s,a) __REG1_ ## cc (name)
+#define __REG2(cc,abi,type,...) __REG2_ ## type (__VA_ARGS__)
+
+#define __REG0_0(abi,name)
+#define __REG0_1(abi,name) __REG0_1 ## abi (name)
+#define __REG0_10(name) __u32 name;
+#define __REG0_11(name) __u32 name;
+#define __REG0_12(name)
+
+#define __REG1_0(name) __u32 name;
+#define __REG1_1(name)
+
+#define __REG2_0(n,s,a) __u32 name;
+#define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
+#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
+
+typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t
+ __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
+typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t
+ __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
+
+#if XTENSA_HAVE_COPROCESSORS
+
+typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t
+ __attribute__ ((aligned (XCHAL_CP0_SA_ALIGN)));
+typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t
+ __attribute__ ((aligned (XCHAL_CP1_SA_ALIGN)));
+typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t
+ __attribute__ ((aligned (XCHAL_CP2_SA_ALIGN)));
+typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t
+ __attribute__ ((aligned (XCHAL_CP3_SA_ALIGN)));
+typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t
+ __attribute__ ((aligned (XCHAL_CP4_SA_ALIGN)));
+typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t
+ __attribute__ ((aligned (XCHAL_CP5_SA_ALIGN)));
+typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
+ __attribute__ ((aligned (XCHAL_CP6_SA_ALIGN)));
+typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
+ __attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
+
+extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
+extern void coprocessor_save(void*, int);
+extern void coprocessor_load(void*, int);
+extern void coprocessor_flush(struct thread_info*, int);
+extern void coprocessor_restore(struct thread_info*, int);
+
+extern void coprocessor_release_all(struct thread_info*);
+extern void coprocessor_flush_all(struct thread_info*);
+
+static inline void coprocessor_clear_cpenable(void)
+{
+ unsigned long i = 0;
+ WSR_CPENABLE(i);
+}
+
+#endif /* XTENSA_HAVE_COPROCESSORS */
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _XTENSA_COPROCESSOR_H */
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
new file mode 100644
index 000000000..5d98a7ad4
--- /dev/null
+++ b/arch/xtensa/include/asm/current.h
@@ -0,0 +1,38 @@
+/*
+ * include/asm-xtensa/current.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CURRENT_H
+#define _XTENSA_CURRENT_H
+
+#include <asm/thread_info.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+static inline struct task_struct *get_current(void)
+{
+ return current_thread_info()->task;
+}
+
+#define current get_current()
+
+#else
+
+#define GET_CURRENT(reg,sp) \
+ GET_THREAD_INFO(reg,sp); \
+ l32i reg, reg, TI_TASK \
+
+#endif
+
+
+#endif /* XTENSA_CURRENT_H */
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h
new file mode 100644
index 000000000..24304b39a
--- /dev/null
+++ b/arch/xtensa/include/asm/delay.h
@@ -0,0 +1,75 @@
+/*
+ * include/asm-xtensa/delay.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ */
+
+#ifndef _XTENSA_DELAY_H
+#define _XTENSA_DELAY_H
+
+#include <asm/timex.h>
+#include <asm/param.h>
+
+extern unsigned long loops_per_jiffy;
+
+static inline void __delay(unsigned long loops)
+{
+ if (__builtin_constant_p(loops) && loops < 2)
+ __asm__ __volatile__ ("nop");
+ else if (loops >= 2)
+ /* 2 cycles per loop. */
+ __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
+ : "+r" (loops));
+}
+
+/* Undefined function to get compile-time error */
+void __bad_udelay(void);
+void __bad_ndelay(void);
+
+#define __MAX_UDELAY 30000
+#define __MAX_NDELAY 30000
+
+static inline void __udelay(unsigned long usecs)
+{
+ unsigned long start = get_ccount();
+ unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5;
+
+ /* Note: all variables are unsigned (can wrap around)! */
+ while (((unsigned long)get_ccount()) - start < cycles)
+ cpu_relax();
+}
+
+static inline void udelay(unsigned long usec)
+{
+ if (__builtin_constant_p(usec) && usec >= __MAX_UDELAY)
+ __bad_udelay();
+ else
+ __udelay(usec);
+}
+
+static inline void __ndelay(unsigned long nsec)
+{
+ /*
+ * Inner shift makes sure multiplication doesn't overflow
+ * for legitimate nsec values
+ */
+ unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15;
+ __delay(cycles);
+}
+
+#define ndelay(n) ndelay(n)
+
+static inline void ndelay(unsigned long nsec)
+{
+ if (__builtin_constant_p(nsec) && nsec >= __MAX_NDELAY)
+ __bad_ndelay();
+ else
+ __ndelay(nsec);
+}
+
+#endif
diff --git a/arch/xtensa/include/asm/dma.h b/arch/xtensa/include/asm/dma.h
new file mode 100644
index 000000000..bb099a373
--- /dev/null
+++ b/arch/xtensa/include/asm/dma.h
@@ -0,0 +1,62 @@
+/*
+ * include/asm-xtensa/dma.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_DMA_H
+#define _XTENSA_DMA_H
+
+#include <asm/io.h> /* need byte IO */
+
+/*
+ * This is only to be defined if we have PC-like DMA.
+ * By default this is not true on an Xtensa processor,
+ * however on boards with a PCI bus, such functionality
+ * might be emulated externally.
+ *
+ * NOTE: there still exists driver code that assumes
+ * this is defined, eg. drivers/sound/soundcard.c (as of 2.4).
+ */
+#define MAX_DMA_CHANNELS 8
+
+/*
+ * The maximum virtual address to which DMA transfers
+ * can be performed on this platform.
+ *
+ * NOTE: This is board (platform) specific, not processor-specific!
+ *
+ * NOTE: This assumes DMA transfers can only be performed on
+ * the section of physical memory contiguously mapped in virtual
+ * space for the kernel. For the Xtensa architecture, this
+ * means the maximum possible size of this DMA area is
+ * the size of the statically mapped kernel segment
+ * (XCHAL_KSEG_{CACHED,BYPASS}_SIZE), ie. 128 MB.
+ *
+ * NOTE: When the entire KSEG area is DMA capable, we subtract
+ * one from the max address so that the virt_to_phys() macro
+ * works correctly on the address (otherwise the address
+ * enters another area, and virt_to_phys() may not return
+ * the value desired).
+ */
+
+#ifndef MAX_DMA_ADDRESS
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
+#endif
+
+/* Reserve and release a DMA channel */
+extern int request_dma(unsigned int dmanr, const char * device_id);
+extern void free_dma(unsigned int dmanr);
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+
+#endif
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
new file mode 100644
index 000000000..eacb25a41
--- /dev/null
+++ b/arch/xtensa/include/asm/elf.h
@@ -0,0 +1,207 @@
+/*
+ * include/asm-xtensa/elf.h
+ *
+ * ELF register definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ELF_H
+#define _XTENSA_ELF_H
+
+#include <asm/ptrace.h>
+#include <asm/coprocessor.h>
+
+/* Xtensa processor ELF architecture-magic number */
+
+#define EM_XTENSA 94
+#define EM_XTENSA_OLD 0xABC7
+
+/* Xtensa relocations defined by the ABIs */
+
+#define R_XTENSA_NONE 0
+#define R_XTENSA_32 1
+#define R_XTENSA_RTLD 2
+#define R_XTENSA_GLOB_DAT 3
+#define R_XTENSA_JMP_SLOT 4
+#define R_XTENSA_RELATIVE 5
+#define R_XTENSA_PLT 6
+#define R_XTENSA_OP0 8
+#define R_XTENSA_OP1 9
+#define R_XTENSA_OP2 10
+#define R_XTENSA_ASM_EXPAND 11
+#define R_XTENSA_ASM_SIMPLIFY 12
+#define R_XTENSA_GNU_VTINHERIT 15
+#define R_XTENSA_GNU_VTENTRY 16
+#define R_XTENSA_DIFF8 17
+#define R_XTENSA_DIFF16 18
+#define R_XTENSA_DIFF32 19
+#define R_XTENSA_SLOT0_OP 20
+#define R_XTENSA_SLOT1_OP 21
+#define R_XTENSA_SLOT2_OP 22
+#define R_XTENSA_SLOT3_OP 23
+#define R_XTENSA_SLOT4_OP 24
+#define R_XTENSA_SLOT5_OP 25
+#define R_XTENSA_SLOT6_OP 26
+#define R_XTENSA_SLOT7_OP 27
+#define R_XTENSA_SLOT8_OP 28
+#define R_XTENSA_SLOT9_OP 29
+#define R_XTENSA_SLOT10_OP 30
+#define R_XTENSA_SLOT11_OP 31
+#define R_XTENSA_SLOT12_OP 32
+#define R_XTENSA_SLOT13_OP 33
+#define R_XTENSA_SLOT14_OP 34
+#define R_XTENSA_SLOT0_ALT 35
+#define R_XTENSA_SLOT1_ALT 36
+#define R_XTENSA_SLOT2_ALT 37
+#define R_XTENSA_SLOT3_ALT 38
+#define R_XTENSA_SLOT4_ALT 39
+#define R_XTENSA_SLOT5_ALT 40
+#define R_XTENSA_SLOT6_ALT 41
+#define R_XTENSA_SLOT7_ALT 42
+#define R_XTENSA_SLOT8_ALT 43
+#define R_XTENSA_SLOT9_ALT 44
+#define R_XTENSA_SLOT10_ALT 45
+#define R_XTENSA_SLOT11_ALT 46
+#define R_XTENSA_SLOT12_ALT 47
+#define R_XTENSA_SLOT13_ALT 48
+#define R_XTENSA_SLOT14_ALT 49
+
+/* ELF register definitions. This is needed for core dump support. */
+
+typedef unsigned long elf_greg_t;
+
+typedef struct {
+ elf_greg_t pc;
+ elf_greg_t ps;
+ elf_greg_t lbeg;
+ elf_greg_t lend;
+ elf_greg_t lcount;
+ elf_greg_t sar;
+ elf_greg_t windowstart;
+ elf_greg_t windowbase;
+ elf_greg_t threadptr;
+ elf_greg_t reserved[7+48];
+ elf_greg_t a[64];
+} xtensa_gregset_t;
+
+#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#define ELF_NFPREG 18
+
+typedef unsigned int elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+#define ELF_CORE_COPY_REGS(_eregs, _pregs) \
+ xtensa_elf_core_copy_regs ((xtensa_gregset_t*)&(_eregs), _pregs);
+
+extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+
+#define elf_check_arch(x) ( ( (x)->e_machine == EM_XTENSA ) || \
+ ( (x)->e_machine == EM_XTENSA_OLD ) )
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+
+#ifdef __XTENSA_EL__
+# define ELF_DATA ELFDATA2LSB
+#elif defined(__XTENSA_EB__)
+# define ELF_DATA ELFDATA2MSB
+#else
+# error processor byte order undefined!
+#endif
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_ARCH EM_XTENSA
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this CPU supports. This could be done in user space,
+ * but it's not easy, and we've already done it here.
+ */
+
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ * For the moment, we have only optimizations for the Intel generations,
+ * but that could change...
+ */
+
+#define ELF_PLATFORM (NULL)
+
+/*
+ * The Xtensa processor ABI says that when the program starts, a2
+ * contains a pointer to a function which might be registered using
+ * `atexit'. This provides a mean for the dynamic linker to call
+ * DT_FINI functions for shared libraries that have been loaded before
+ * the code runs.
+ *
+ * A value of 0 tells we have no such handler.
+ *
+ * We might as well make sure everything else is cleared too (except
+ * for the stack pointer in a1), just to make things more
+ * deterministic. Also, clearing a0 terminates debugger backtraces.
+ */
+
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
+ _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
+ _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
+ _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
+ } while (0)
+
+typedef struct {
+ xtregs_opt_t opt;
+ xtregs_user_t user;
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_cp0_t cp0;
+ xtregs_cp1_t cp1;
+ xtregs_cp2_t cp2;
+ xtregs_cp3_t cp3;
+ xtregs_cp4_t cp4;
+ xtregs_cp5_t cp5;
+ xtregs_cp6_t cp6;
+ xtregs_cp7_t cp7;
+#endif
+} elf_xtregs_t;
+
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
+
+struct task_struct;
+
+extern void do_copy_regs (xtensa_gregset_t*, struct pt_regs*,
+ struct task_struct*);
+extern void do_restore_regs (xtensa_gregset_t*, struct pt_regs*,
+ struct task_struct*);
+extern void do_save_fpregs (elf_fpregset_t*, struct pt_regs*,
+ struct task_struct*);
+extern int do_restore_fpregs (elf_fpregset_t*, struct pt_regs*,
+ struct task_struct*);
+
+#endif /* _XTENSA_ELF_H */
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
new file mode 100644
index 000000000..7e25c1b50
--- /dev/null
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -0,0 +1,85 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/pgtable.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the start of the consistent memory region upwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM
+ /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN +
+ (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1,
+#endif
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+
+#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /* Check if this memory layout is broken because fixmap overlaps page
+ * table.
+ */
+ BUILD_BUG_ON(FIXADDR_START <
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+#endif
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel( \
+ pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
+ (vaddr) \
+ )
+
+#endif
diff --git a/arch/xtensa/include/asm/flat.h b/arch/xtensa/include/asm/flat.h
new file mode 100644
index 000000000..b8532d787
--- /dev/null
+++ b/arch/xtensa/include/asm/flat.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_XTENSA_FLAT_H
+#define __ASM_XTENSA_FLAT_H
+
+#include <asm/unaligned.h>
+
+#define flat_argvp_envp_on_stack() 0
+#define flat_old_ram_flag(flags) (flags)
+#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr, u32 *persistent)
+{
+ *addr = get_unaligned((__force u32 *)rp);
+ return 0;
+}
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+ put_unaligned(addr, (__force u32 *)rp);
+ return 0;
+}
+#define flat_get_relocate_addr(rel) (rel)
+#define flat_set_persistent(relval, p) 0
+
+#endif /* __ASM_XTENSA_FLAT_H */
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
new file mode 100644
index 000000000..6c6d9a9f1
--- /dev/null
+++ b/arch/xtensa/include/asm/ftrace.h
@@ -0,0 +1,40 @@
+/*
+ * arch/xtensa/include/asm/ftrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_FTRACE_H
+#define _XTENSA_FTRACE_H
+
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+#define ftrace_return_address0 ({ unsigned long a0, a1; \
+ __asm__ __volatile__ ( \
+ "mov %0, a0\n" \
+ "mov %1, a1\n" \
+ : "=r"(a0), "=r"(a1)); \
+ MAKE_PC_FROM_RA(a0, a1); })
+
+#ifdef CONFIG_FRAME_POINTER
+extern unsigned long return_address(unsigned level);
+#define ftrace_return_address(n) return_address(n)
+#endif
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+#define MCOUNT_INSN_SIZE 3
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+#define mcount _mcount
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* _XTENSA_FTRACE_H */
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
new file mode 100644
index 000000000..5bfbc1c40
--- /dev/null
+++ b/arch/xtensa/include/asm/futex.h
@@ -0,0 +1,127 @@
+/*
+ * Atomic futex routines
+ *
+ * Based on the PowerPC implementataion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2013 TangoTec Ltd.
+ *
+ * Baruch Siach <baruch@tkos.co.il>
+ */
+
+#ifndef _ASM_XTENSA_FUTEX_H
+#define _ASM_XTENSA_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile( \
+ "1: l32i %0, %2, 0\n" \
+ insn "\n" \
+ " wsr %0, scompare1\n" \
+ "2: s32c1i %1, %2, 0\n" \
+ " bne %1, %0, 1b\n" \
+ " movi %1, 0\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "4: .long 3b\n" \
+ "5: l32r %0, 4b\n" \
+ " movi %1, %3\n" \
+ " jx %0\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .long 1b,5b,2b,5b\n" \
+ " .previous\n" \
+ : "=&r" (oldval), "=&r" (ret) \
+ : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
+ : "memory")
+
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
+{
+ int oldval = 0, ret;
+
+#if !XCHAL_HAVE_S32C1I
+ return -ENOSYS;
+#endif
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr,
+ ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr,
+ oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+#if !XCHAL_HAVE_S32C1I
+ return -ENOSYS;
+#endif
+
+ __asm__ __volatile__ (
+ " # futex_atomic_cmpxchg_inatomic\n"
+ " wsr %5, scompare1\n"
+ "1: s32c1i %1, %4, 0\n"
+ " s32i %1, %6, 0\n"
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ " .align 4\n"
+ "3: .long 2b\n"
+ "4: l32r %1, 3b\n"
+ " movi %0, %7\n"
+ " jx %1\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ " .long 1b,4b\n"
+ " .previous\n"
+ : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
+ : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
+ : "memory");
+
+ return ret;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_XTENSA_FUTEX_H */
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
new file mode 100644
index 000000000..04e9340ea
--- /dev/null
+++ b/arch/xtensa/include/asm/highmem.h
@@ -0,0 +1,100 @@
+/*
+ * include/asm-xtensa/highmem.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_HIGHMEM_H
+#define _XTENSA_HIGHMEM_H
+
+#include <linux/wait.h>
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
+
+#define PKMAP_BASE ((FIXADDR_START - \
+ (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
+#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS)
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot PAGE_KERNEL_EXEC
+
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+#define get_pkmap_color get_pkmap_color
+static inline int get_pkmap_color(struct page *page)
+{
+ return DCACHE_ALIAS(page_to_phys(page));
+}
+
+extern unsigned int last_pkmap_nr_arr[];
+
+static inline unsigned int get_next_pkmap_nr(unsigned int color)
+{
+ last_pkmap_nr_arr[color] =
+ (last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK;
+ return last_pkmap_nr_arr[color] + color;
+}
+
+static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
+{
+ return pkmap_nr < DCACHE_N_COLORS;
+}
+
+static inline int get_pkmap_entries_count(unsigned int color)
+{
+ return LAST_PKMAP / DCACHE_N_COLORS;
+}
+
+extern wait_queue_head_t pkmap_map_wait_arr[];
+
+static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
+{
+ return pkmap_map_wait_arr + color;
+}
+#endif
+
+extern pte_t *pkmap_page_table;
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+
+static inline void *kmap(struct page *page)
+{
+ /* Check if this memory layout is broken because PKMAP overlaps
+ * page table.
+ */
+ BUILD_BUG_ON(PKMAP_BASE <
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_high(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+static inline void flush_cache_kmaps(void)
+{
+ flush_cache_all();
+}
+
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
+void kmap_init(void);
+
+#endif
diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000..9f119c1ca
--- /dev/null
+++ b/arch/xtensa/include/asm/hw_breakpoint.h
@@ -0,0 +1,61 @@
+/*
+ * Xtensa hardware breakpoints/watchpoints handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef __ASM_XTENSA_HW_BREAKPOINT_H
+#define __ASM_XTENSA_HW_BREAKPOINT_H
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <uapi/linux/hw_breakpoint.h>
+
+/* Breakpoint */
+#define XTENSA_BREAKPOINT_EXECUTE 0
+
+/* Watchpoints */
+#define XTENSA_BREAKPOINT_LOAD 1
+#define XTENSA_BREAKPOINT_STORE 2
+
+struct arch_hw_breakpoint {
+ unsigned long address;
+ u16 len;
+ u16 type;
+};
+
+struct perf_event_attr;
+struct perf_event;
+struct pt_regs;
+struct task_struct;
+
+int hw_breakpoint_slots(int type);
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw);
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data);
+
+int arch_install_hw_breakpoint(struct perf_event *bp);
+void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+void hw_breakpoint_pmu_read(struct perf_event *bp);
+int check_hw_breakpoint(struct pt_regs *regs);
+void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
+
+#else
+
+struct task_struct;
+
+static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+}
+
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif /* __ASM_XTENSA_HW_BREAKPOINT_H */
diff --git a/arch/xtensa/include/asm/hw_irq.h b/arch/xtensa/include/asm/hw_irq.h
new file mode 100644
index 000000000..3ddbea759
--- /dev/null
+++ b/arch/xtensa/include/asm/hw_irq.h
@@ -0,0 +1,14 @@
+/*
+ * include/asm-xtensa/hw_irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_HW_IRQ_H
+#define _XTENSA_HW_IRQ_H
+
+#endif
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
new file mode 100644
index 000000000..10e9852b2
--- /dev/null
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -0,0 +1,216 @@
+/*
+ * arch/xtensa/include/asm/initialize_mmu.h
+ *
+ * Initializes MMU:
+ *
+ * For the new V3 MMU we remap the TLB from virtual == physical
+ * to the standard Linux mapping used in earlier MMU's.
+ *
+ * The the MMU we also support a new configuration register that
+ * specifies how the S32C1I instruction operates with the cache
+ * controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica, Inc.
+ *
+ * Marc Gauthier <marc@tensilica.com>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#ifndef _XTENSA_INITIALIZE_MMU_H
+#define _XTENSA_INITIALIZE_MMU_H
+
+#include <asm/pgtable.h>
+#include <asm/vectors.h>
+
+#if XCHAL_HAVE_PTP_MMU
+#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#else
+#define CA_WRITEBACK (0x4)
+#endif
+
+#ifndef XCHAL_SPANNING_WAY
+#define XCHAL_SPANNING_WAY 0
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define XTENSA_HWVERSION_RC_2009_0 230000
+
+ .macro initialize_mmu
+
+#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+/*
+ * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
+ * For details see Documentation/xtensa/atomctl.txt
+ */
+#if XCHAL_DCACHE_IS_COHERENT
+ movi a3, 0x25 /* For SMP/MX -- internal for writeback,
+ * RCW otherwise
+ */
+#else
+ movi a3, 0x29 /* non-MX -- Most cores use Std Memory
+ * Controlers which usually can't use RCW
+ */
+#endif
+ wsr a3, atomctl
+#endif /* XCHAL_HAVE_S32C1I &&
+ * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+ */
+
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/*
+ * Have MMU v3
+ */
+
+#if !XCHAL_HAVE_VECBASE
+# error "MMU v3 requires reloc vectors"
+#endif
+
+ movi a1, 0
+ _call0 1f
+ _j 2f
+
+ .align 4
+1: movi a2, 0x10000000
+
+#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
+#define TEMP_MAPPING_VADDR 0x40000000
+#else
+#define TEMP_MAPPING_VADDR 0x00000000
+#endif
+
+ /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
+
+ movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
+ idtlb a2
+ iitlb a2
+ isync
+
+ /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
+ * and jump to the new mapping.
+ */
+
+ srli a3, a0, 27
+ slli a3, a3, 27
+ addi a3, a3, CA_BYPASS
+ addi a7, a2, 5 - XCHAL_SPANNING_WAY
+ wdtlb a3, a7
+ witlb a3, a7
+ isync
+
+ slli a4, a0, 5
+ srli a4, a4, 5
+ addi a5, a2, -XCHAL_SPANNING_WAY
+ add a4, a4, a5
+ jx a4
+
+ /* Step 3: unmap everything other than current area.
+ * Start at 0x60000000, wrap around, and end with 0x20000000
+ */
+2: movi a4, 0x20000000
+ add a5, a2, a4
+3: idtlb a5
+ iitlb a5
+ add a5, a5, a4
+ bne a5, a2, 3b
+
+ /* Step 4: Setup MMU with the requested static mappings. */
+
+ movi a6, 0x01000000
+ wsr a6, ITLBCFG
+ wsr a6, DTLBCFG
+ isync
+
+ movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+#ifdef CONFIG_XTENSA_KSEG_512M
+ movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+#endif
+
+ movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
+ movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
+ movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+ isync
+
+ /* Jump to self, using final mappings. */
+ movi a4, 1f
+ jx a4
+
+1:
+ /* Step 5: remove temporary mapping. */
+ idtlb a7
+ iitlb a7
+ isync
+
+ movi a0, 0
+ wsr a0, ptevaddr
+ rsync
+
+#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
+ XCHAL_HAVE_SPANNING_WAY */
+
+ .endm
+
+ .macro initialize_cacheattr
+
+#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
+#if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
+#error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
+#endif
+
+ movi a5, XCHAL_SPANNING_WAY
+ movi a6, ~_PAGE_ATTRIB_MASK
+ movi a4, CONFIG_MEMMAP_CACHEATTR
+ movi a8, 0x20000000
+1:
+ rdtlb1 a3, a5
+ xor a3, a3, a4
+ and a3, a3, a6
+ xor a3, a3, a4
+ wdtlb a3, a5
+ ritlb1 a3, a5
+ xor a3, a3, a4
+ and a3, a3, a6
+ xor a3, a3, a4
+ witlb a3, a5
+
+ add a5, a5, a8
+ srli a4, a4, 4
+ bgeu a5, a8, 1b
+
+ isync
+#endif
+
+ .endm
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _XTENSA_INITIALIZE_MMU_H */
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
new file mode 100644
index 000000000..acc5bb2cf
--- /dev/null
+++ b/arch/xtensa/include/asm/io.h
@@ -0,0 +1,85 @@
+/*
+ * include/asm-xtensa/io.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IO_H
+#define _XTENSA_IO_H
+
+#ifdef __KERNEL__
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <asm/vectors.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <linux/types.h>
+
+#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
+#define IO_SPACE_LIMIT ~0
+
+#ifdef CONFIG_MMU
+
+void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
+void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
+void xtensa_iounmap(volatile void __iomem *addr);
+
+/*
+ * Return the virtual address for the specified bus memory.
+ */
+static inline void __iomem *ioremap_nocache(unsigned long offset,
+ unsigned long size)
+{
+ if (offset >= XCHAL_KIO_PADDR
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
+ return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
+ else
+ return xtensa_ioremap_nocache(offset, size);
+}
+
+static inline void __iomem *ioremap_cache(unsigned long offset,
+ unsigned long size)
+{
+ if (offset >= XCHAL_KIO_PADDR
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
+ return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
+ else
+ return xtensa_ioremap_cache(offset, size);
+}
+#define ioremap_cache ioremap_cache
+#define ioremap_nocache ioremap_nocache
+
+#define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
+
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+{
+ return ioremap_nocache(offset, size);
+}
+
+static inline void iounmap(volatile void __iomem *addr)
+{
+ unsigned long va = (unsigned long) addr;
+
+ if (!(va >= XCHAL_KIO_CACHED_VADDR &&
+ va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
+ !(va >= XCHAL_KIO_BYPASS_VADDR &&
+ va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
+ xtensa_iounmap(addr);
+}
+
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+#endif /* CONFIG_MMU */
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/io.h>
+
+#endif /* _XTENSA_IO_H */
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
new file mode 100644
index 000000000..6c6ed23e0
--- /dev/null
+++ b/arch/xtensa/include/asm/irq.h
@@ -0,0 +1,42 @@
+/*
+ * include/asm-xtensa/irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IRQ_H
+#define _XTENSA_IRQ_H
+
+#include <linux/init.h>
+#include <variant/core.h>
+
+#ifdef CONFIG_PLATFORM_NR_IRQS
+# define PLATFORM_NR_IRQS CONFIG_PLATFORM_NR_IRQS
+#else
+# define PLATFORM_NR_IRQS 0
+#endif
+#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
+#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return (irq);
+}
+
+struct irqaction;
+struct irq_domain;
+
+void migrate_irqs(void);
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+ unsigned long int_irq, unsigned long ext_irq,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw);
+unsigned xtensa_map_ext_irq(unsigned ext_irq);
+unsigned xtensa_get_ext_irq_no(unsigned irq);
+
+#endif /* _XTENSA_IRQ_H */
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
new file mode 100644
index 000000000..407606e57
--- /dev/null
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -0,0 +1,82 @@
+/*
+ * Xtensa IRQ flags handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_IRQFLAGS_H
+#define _XTENSA_IRQFLAGS_H
+
+#include <linux/types.h>
+#include <asm/processor.h>
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ asm volatile("rsr %0, ps" : "=a" (flags));
+ return flags;
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+ unsigned long tmp;
+
+ asm volatile("rsr %0, ps\t\n"
+ "extui %1, %0, 0, 4\t\n"
+ "bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n"
+ "rsil %0, "__stringify(LOCKLEVEL)"\n"
+ "1:"
+ : "=a" (flags), "=a" (tmp) :: "memory");
+#else
+ asm volatile("rsr %0, ps\t\n"
+ "or %0, %0, %1\t\n"
+ "xsr %0, ps\t\n"
+ "rsync"
+ : "=&a" (flags) : "a" (LOCKLEVEL) : "memory");
+#endif
+#else
+ asm volatile("rsil %0, "__stringify(LOCKLEVEL)
+ : "=a" (flags) :: "memory");
+#endif
+ return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ arch_local_irq_save();
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long flags;
+ asm volatile("rsil %0, 0" : "=a" (flags) :: "memory");
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ asm volatile("wsr %0, ps; rsync"
+ :: "a" (flags) : "memory");
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL
+#error "XCHAL_EXCM_LEVEL and 1<<PS_EXCM_BIT must be no less than LOCKLEVEL"
+#endif
+ return (flags & (PS_INTLEVEL_MASK | (1 << PS_EXCM_BIT))) >= LOCKLEVEL;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* _XTENSA_IRQFLAGS_H */
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h
new file mode 100644
index 000000000..216b6f32c
--- /dev/null
+++ b/arch/xtensa/include/asm/kasan.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <asm/kmem_layout.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+/* Start of area covered by KASAN */
+#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
+/* Start of the shadow map */
+#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
+/* Size of the shadow map */
+#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
+/* Offset for mem to shadow address transformation */
+#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+
+void __init kasan_early_init(void);
+void __init kasan_init(void);
+
+#else
+
+static inline void kasan_early_init(void)
+{
+}
+
+static inline void kasan_init(void)
+{
+}
+
+#endif
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
new file mode 100644
index 000000000..9c12babc0
--- /dev/null
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -0,0 +1,75 @@
+/*
+ * Kernel virtual memory layout definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_KMEM_LAYOUT_H
+#define _XTENSA_KMEM_LAYOUT_H
+
+#include <asm/types.h>
+
+#ifdef CONFIG_MMU
+
+/*
+ * Fixed TLB translations in the processor.
+ */
+
+#define XCHAL_PAGE_TABLE_VADDR __XTENSA_UL_CONST(0x80000000)
+#define XCHAL_PAGE_TABLE_SIZE __XTENSA_UL_CONST(0x00400000)
+
+#if defined(CONFIG_XTENSA_KSEG_MMU_V2)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_TLB_WAY 5
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_256M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xb0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_512M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xa0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#else
+#error Unsupported KSEG configuration
+#endif
+
+#ifdef CONFIG_KSEG_PADDR
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(CONFIG_KSEG_PADDR)
+#else
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
+#endif
+
+#if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1)
+#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
+#endif
+
+#endif
+
+#ifndef CONFIG_KASAN
+#define KERNEL_STACK_SHIFT 13
+#else
+#define KERNEL_STACK_SHIFT 15
+#endif
+#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT)
+
+#endif
diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h
new file mode 100644
index 000000000..0ba997323
--- /dev/null
+++ b/arch/xtensa/include/asm/linkage.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif
diff --git a/arch/xtensa/include/asm/mmu.h b/arch/xtensa/include/asm/mmu.h
new file mode 100644
index 000000000..71afe418d
--- /dev/null
+++ b/arch/xtensa/include/asm/mmu.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMU_H
+#define _XTENSA_MMU_H
+
+#ifndef CONFIG_MMU
+#include <asm-generic/mmu.h>
+#else
+
+typedef struct {
+ unsigned long asid[NR_CPUS];
+ unsigned int cpu;
+} mm_context_t;
+
+#endif /* CONFIG_MMU */
+#endif /* _XTENSA_MMU_H */
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
new file mode 100644
index 000000000..de5e6cbba
--- /dev/null
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -0,0 +1,159 @@
+/*
+ * Switch an MMU context.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMU_CONTEXT_H
+#define _XTENSA_MMU_CONTEXT_H
+
+#ifndef CONFIG_MMU
+#include <asm/nommu_context.h>
+#else
+
+#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+
+#include <asm/vectors.h>
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/mm_hooks.h>
+#include <asm-generic/percpu.h>
+
+#if (XCHAL_HAVE_TLBS != 1)
+# error "Linux must have an MMU!"
+#endif
+
+DECLARE_PER_CPU(unsigned long, asid_cache);
+#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
+
+/*
+ * NO_CONTEXT is the invalid ASID value that we don't ever assign to
+ * any user or kernel context. We use the reserved values in the
+ * ASID_INSERT macro below.
+ *
+ * 0 invalid
+ * 1 kernel
+ * 2 reserved
+ * 3 reserved
+ * 4...255 available
+ */
+
+#define NO_CONTEXT 0
+#define ASID_USER_FIRST 4
+#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
+#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
+
+void init_mmu(void);
+void init_kio(void);
+
+static inline void set_rasid_register (unsigned long val)
+{
+ __asm__ __volatile__ (" wsr %0, rasid\n\t"
+ " isync\n" : : "a" (val));
+}
+
+static inline unsigned long get_rasid_register (void)
+{
+ unsigned long tmp;
+ __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
+ return tmp;
+}
+
+static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ unsigned long asid = cpu_asid_cache(cpu);
+ if ((++asid & ASID_MASK) == 0) {
+ /*
+ * Start new asid cycle; continue counting with next
+ * incarnation bits; skipping over 0, 1, 2, 3.
+ */
+ local_flush_tlb_all();
+ asid += ASID_USER_FIRST;
+ }
+ cpu_asid_cache(cpu) = asid;
+ mm->context.asid[cpu] = asid;
+ mm->context.cpu = cpu;
+}
+
+static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
+{
+ /*
+ * Check if our ASID is of an older version and thus invalid.
+ */
+
+ if (mm) {
+ unsigned long asid = mm->context.asid[cpu];
+
+ if (asid == NO_CONTEXT ||
+ ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
+ get_new_mmu_context(mm, cpu);
+ }
+}
+
+static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
+{
+ get_mmu_context(mm, cpu);
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
+ invalidate_page_directory();
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
+ * to -1 says the process has never run on any core.
+ */
+
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ mm->context.asid[cpu] = NO_CONTEXT;
+ }
+ mm->context.cpu = -1;
+ return 0;
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+ int migrated = next->context.cpu != cpu;
+ /* Flush the icache if we migrated to a new core. */
+ if (migrated) {
+ __invalidate_icache_all();
+ next->context.cpu = cpu;
+ }
+ if (migrated || prev != next)
+ activate_context(next, cpu);
+}
+
+#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+ invalidate_page_directory();
+}
+
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ /* Nothing to do. */
+
+}
+
+#endif /* CONFIG_MMU */
+#endif /* _XTENSA_MMU_CONTEXT_H */
diff --git a/arch/xtensa/include/asm/module.h b/arch/xtensa/include/asm/module.h
new file mode 100644
index 000000000..488b40c6f
--- /dev/null
+++ b/arch/xtensa/include/asm/module.h
@@ -0,0 +1,20 @@
+/*
+ * include/asm-xtensa/module.h
+ *
+ * This file contains the module code specific to the Xtensa architecture.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MODULE_H
+#define _XTENSA_MODULE_H
+
+#define MODULE_ARCH_VERMAGIC "xtensa-" __stringify(XCHAL_CORE_ID) " "
+
+#include <asm-generic/module.h>
+
+#endif /* _XTENSA_MODULE_H */
diff --git a/arch/xtensa/include/asm/mxregs.h b/arch/xtensa/include/asm/mxregs.h
new file mode 100644
index 000000000..73dcc5456
--- /dev/null
+++ b/arch/xtensa/include/asm/mxregs.h
@@ -0,0 +1,46 @@
+/*
+ * Xtensa MX interrupt distributor
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MXREGS_H
+#define _XTENSA_MXREGS_H
+
+/*
+ * RER/WER at, as Read/write external register
+ * at: value
+ * as: address
+ *
+ * Address Value
+ * 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p
+ * 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p
+ * 0180 0...0m..m Clear enable specified by mask (m)
+ * 0184 0...0m..m Set enable specified by mask (m)
+ * 0190 0...0x..x 8-bit IPI partition register
+ * VVVVVVVVPPPPUUUUUUUUUUUUUUUUU
+ * V (10-bit) Release/Version
+ * P ( 4-bit) Number of cores - 1
+ * U (18-bit) ID
+ * 01a0 i.......i 32-bit ConfigID
+ * 0200 0...0m..m RunStall core 'n'
+ * 0220 c Cache coherency enabled
+ */
+
+#define MIROUT(irq) (0x000 + (irq))
+#define MIPICAUSE(cpu) (0x100 + (cpu))
+#define MIPISET(cause) (0x140 + (cause))
+#define MIENG 0x180
+#define MIENGSET 0x184
+#define MIASG 0x188 /* Read Global Assert Register */
+#define MIASGSET 0x18c /* Set Global Addert Regiter */
+#define MIPIPART 0x190
+#define SYSCFGID 0x1a0
+#define MPSCORE 0x200
+#define CCON 0x220
+
+#endif /* _XTENSA_MXREGS_H */
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
new file mode 100644
index 000000000..37251b2ef
--- /dev/null
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+static inline void init_mmu(void)
+{
+}
+
+static inline void init_kio(void)
+{
+}
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+
+static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm)
+{
+}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
new file mode 100644
index 000000000..09c56cba4
--- /dev/null
+++ b/arch/xtensa/include/asm/page.h
@@ -0,0 +1,199 @@
+/*
+ * include/asm-xtensa/page.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PAGE_H
+#define _XTENSA_PAGE_H
+
+#include <asm/processor.h>
+#include <asm/types.h>
+#include <asm/cache.h>
+#include <asm/kmem_layout.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef CONFIG_MMU
+#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
+#define PHYS_OFFSET XCHAL_KSEG_PADDR
+#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
+ PHYS_PFN(XCHAL_KSEG_SIZE))
+#else
+#define PAGE_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
+#define PHYS_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
+#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
+#endif
+
+/*
+ * Cache aliasing:
+ *
+ * If the cache size for one way is greater than the page size, we have to
+ * deal with cache aliasing. The cache index is wider than the page size:
+ *
+ * | |cache| cache index
+ * | pfn |off| virtual address
+ * |xxxx:X|zzz|
+ * | : | |
+ * | \ / | |
+ * |trans.| |
+ * | / \ | |
+ * |yyyy:Y|zzz| physical address
+ *
+ * When the page number is translated to the physical page address, the lowest
+ * bit(s) (X) that are part of the cache index are also translated (Y).
+ * If this translation changes bit(s) (X), the cache index is also afected,
+ * thus resulting in a different cache line than before.
+ * The kernel does not provide a mechanism to ensure that the page color
+ * (represented by this bit) remains the same when allocated or when pages
+ * are remapped. When user pages are mapped into kernel space, the color of
+ * the page might also change.
+ *
+ * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
+ * to temporarily map a patch so we can match the color.
+ */
+
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
+# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
+# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
+# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
+#else
+# define DCACHE_ALIAS_ORDER 0
+# define DCACHE_ALIAS(a) ((void)(a), 0)
+#endif
+#define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER)
+
+#if ICACHE_WAY_SIZE > PAGE_SIZE
+# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
+# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
+# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
+# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
+#else
+# define ICACHE_ALIAS_ORDER 0
+#endif
+
+
+#ifdef __ASSEMBLY__
+
+#define __pgprot(x) (x)
+
+#else
+
+/*
+ * These are used to make use of C type-checking..
+ */
+
+typedef struct { unsigned long pte; } pte_t; /* page table entry */
+typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+/*
+ * Pure 2^n version of get_order
+ * Use 'nsau' instructions if supported by the processor or the generic version.
+ */
+
+#if XCHAL_HAVE_NSA
+
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ int lz;
+ asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
+ return 32 - lz;
+}
+
+#else
+
+# include <asm-generic/getorder.h>
+
+#endif
+
+struct page;
+struct vm_area_struct;
+extern void clear_page(void *page);
+extern void copy_page(void *to, void *from);
+
+/*
+ * If we have cache aliasing and writeback caches, we might have to do
+ * some extra work
+ */
+
+#if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
+extern void clear_page_alias(void *vaddr, unsigned long paddr);
+extern void copy_page_alias(void *to, void *from,
+ unsigned long to_paddr, unsigned long from_paddr);
+
+#define clear_user_highpage clear_user_highpage
+void clear_user_highpage(struct page *page, unsigned long vaddr);
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+#else
+# define clear_user_page(page, vaddr, pg) clear_page(page)
+# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+#endif
+
+/*
+ * This handles the memory map. We handle pages at
+ * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
+ * These macros are for conversion of kernel address, not user
+ * addresses.
+ */
+
+#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
+#ifdef CONFIG_MMU
+static inline unsigned long ___pa(unsigned long va)
+{
+ unsigned long off = va - PAGE_OFFSET;
+
+ if (off >= XCHAL_KSEG_SIZE)
+ off -= XCHAL_KSEG_SIZE;
+
+ return off + PHYS_OFFSET;
+}
+#define __pa(x) ___pa((unsigned long)(x))
+#else
+#define __pa(x) \
+ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#endif
+#define __va(x) \
+ ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
+#define pfn_valid(pfn) \
+ ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+#ifdef CONFIG_DISCONTIGMEM
+# error CONFIG_DISCONTIGMEM not supported
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#endif /* __ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#endif /* _XTENSA_PAGE_H */
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
new file mode 100644
index 000000000..0b68c76ec
--- /dev/null
+++ b/arch/xtensa/include/asm/pci-bridge.h
@@ -0,0 +1,88 @@
+/*
+ * include/asm-xtensa/pci-bridge.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PCI_BRIDGE_H
+#define _XTENSA_PCI_BRIDGE_H
+
+#ifdef __KERNEL__
+
+struct device_node;
+struct pci_controller;
+
+/*
+ * pciauto_bus_scan() enumerates the pci space.
+ */
+
+extern int pciauto_bus_scan(struct pci_controller *, int);
+
+struct pci_space {
+ unsigned long start;
+ unsigned long end;
+ unsigned long base;
+};
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+
+struct pci_controller {
+ int index; /* used for pci_controller_num */
+ struct pci_controller *next;
+ struct pci_bus *bus;
+ void *arch_data;
+
+ int first_busno;
+ int last_busno;
+
+ struct pci_ops *ops;
+ volatile unsigned int *cfg_addr;
+ volatile unsigned char *cfg_data;
+
+ /* Currently, we limit ourselves to 1 IO range and 3 mem
+ * ranges since the common pci_bus structure can't handle more
+ */
+ struct resource io_resource;
+ struct resource mem_resources[3];
+ int mem_resource_count;
+
+ /* Host bridge I/O and Memory space
+ * Used for BAR placement algorithms
+ */
+ struct pci_space io_space;
+ struct pci_space mem_space;
+
+ /* Return the interrupt number fo a device. */
+ int (*map_irq)(struct pci_dev*, u8, u8);
+
+};
+
+static inline void pcibios_init_resource(struct resource *res,
+ unsigned long start, unsigned long end, int flags, char *name)
+{
+ res->start = start;
+ res->end = end;
+ res->flags = flags;
+ res->name = name;
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+}
+
+
+/* These are used for config access before all the PCI probing has been done. */
+int early_read_config_byte(struct pci_controller*, int, int, int, u8*);
+int early_read_config_word(struct pci_controller*, int, int, int, u16*);
+int early_read_config_dword(struct pci_controller*, int, int, int, u32*);
+int early_write_config_byte(struct pci_controller*, int, int, int, u8);
+int early_write_config_word(struct pci_controller*, int, int, int, u16);
+int early_write_config_dword(struct pci_controller*, int, int, int, u32);
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_PCI_BRIDGE_H */
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
new file mode 100644
index 000000000..883024054
--- /dev/null
+++ b/arch/xtensa/include/asm/pci.h
@@ -0,0 +1,53 @@
+/*
+ * linux/include/asm-xtensa/pci.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PCI_H
+#define _XTENSA_PCI_H
+
+#ifdef __KERNEL__
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ * already-configured bus numbers - to be used for buggy BIOSes
+ * or architectures with incomplete PCI setup by the loader
+ */
+
+#define pcibios_assign_all_busses() 0
+
+/* Assume some values. (We should revise them, if necessary) */
+
+#define PCIBIOS_MIN_IO 0x2000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+/* Dynamic DMA mapping stuff.
+ * Xtensa has everything mapped statically like x86.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+/* The PCI address space does equal the physical memory address space.
+ * The networking and block device layers use this boolean for bounce buffer
+ * decisions.
+ */
+
+/* Tell PCI code what kind of PCI resource mappings we support */
+#define HAVE_PCI_MMAP 1
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
+#define arch_can_pci_mmap_io() 1
+
+#endif /* __KERNEL__ */
+
+/* Generic PCI */
+#include <asm-generic/pci.h>
+
+#endif /* _XTENSA_PCI_H */
diff --git a/arch/xtensa/include/asm/perf_event.h b/arch/xtensa/include/asm/perf_event.h
new file mode 100644
index 000000000..5aa4590ac
--- /dev/null
+++ b/arch/xtensa/include/asm/perf_event.h
@@ -0,0 +1,4 @@
+#ifndef __ASM_XTENSA_PERF_EVENT_H
+#define __ASM_XTENSA_PERF_EVENT_H
+
+#endif /* __ASM_XTENSA_PERF_EVENT_H */
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
new file mode 100644
index 000000000..1065bc8bc
--- /dev/null
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -0,0 +1,85 @@
+/*
+ * include/asm-xtensa/pgalloc.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PGALLOC_H
+#define _XTENSA_PGALLOC_H
+
+#ifdef __KERNEL__
+
+#include <linux/highmem.h>
+#include <linux/slab.h>
+
+/*
+ * Allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+
+#define pmd_populate_kernel(mm, pmdp, ptep) \
+ (pmd_val(*(pmdp)) = ((unsigned long)ptep))
+#define pmd_populate(mm, pmdp, page) \
+ (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline pgd_t*
+pgd_alloc(struct mm_struct *mm)
+{
+ return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *ptep;
+ int i;
+
+ ptep = (pte_t *)__get_free_page(GFP_KERNEL);
+ if (!ptep)
+ return NULL;
+ for (i = 0; i < 1024; i++)
+ pte_clear(NULL, 0, ptep + i);
+ return ptep;
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long addr)
+{
+ pte_t *pte;
+ struct page *page;
+
+ pte = pte_alloc_one_kernel(mm, addr);
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ return page;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ pgtable_page_dtor(pte);
+ __free_page(pte);
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_PGALLOC_H */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
new file mode 100644
index 000000000..29cfe421c
--- /dev/null
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -0,0 +1,454 @@
+/*
+ * include/asm-xtensa/pgtable.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PGTABLE_H
+#define _XTENSA_PGTABLE_H
+
+#define __ARCH_USE_5LEVEL_HACK
+#include <asm/page.h>
+#include <asm/kmem_layout.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/*
+ * We only use two ring levels, user and kernel space.
+ */
+
+#ifdef CONFIG_MMU
+#define USER_RING 1 /* user ring level */
+#else
+#define USER_RING 0
+#endif
+#define KERNEL_RING 0 /* kernel ring level */
+
+/*
+ * The Xtensa architecture port of Linux has a two-level page table system,
+ * i.e. the logical three-level Linux page table layout is folded.
+ * Each task has the following memory page tables:
+ *
+ * PGD table (page directory), ie. 3rd-level page table:
+ * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
+ * (Architectures that don't have the PMD folded point to the PMD tables)
+ *
+ * The pointer to the PGD table for a given task can be retrieved from
+ * the task structure (struct task_struct*) t, e.g. current():
+ * (t->mm ? t->mm : t->active_mm)->pgd
+ *
+ * PMD tables (page middle-directory), ie. 2nd-level page tables:
+ * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1).
+ *
+ * PTE tables (page table entry), ie. 1st-level page tables:
+ * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
+ * invalid_pte_table for absent mappings.
+ *
+ * The individual pages are 4 kB big with special pages for the empty_zero_page.
+ */
+
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * Entries per page directory level: we use two-level, so
+ * we don't really have any PMD directory physically.
+ */
+#define PTRS_PER_PTE 1024
+#define PTRS_PER_PTE_SHIFT 10
+#define PTRS_PER_PGD 1024
+#define PGD_ORDER 0
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0UL
+#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
+
+#ifdef CONFIG_MMU
+/*
+ * Virtual memory area. We keep a distance to other memory regions to be
+ * on the safe side. We also use this area for cache aliasing.
+ */
+#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000)
+#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF)
+#define TLBTEMP_BASE_1 (VMALLOC_END + 1)
+#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
+#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
+#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
+#else
+#define TLBTEMP_SIZE ICACHE_WAY_SIZE
+#endif
+
+#else
+
+#define VMALLOC_START __XTENSA_UL_CONST(0)
+#define VMALLOC_END __XTENSA_UL_CONST(0xffffffff)
+
+#endif
+
+/*
+ * For the Xtensa architecture, the PTE layout is as follows:
+ *
+ * 31------12 11 10-9 8-6 5-4 3-2 1-0
+ * +-----------------------------------------+
+ * | | Software | HARDWARE |
+ * | PPN | ADW | RI |Attribute|
+ * +-----------------------------------------+
+ * pte_none | MBZ | 01 | 11 | 00 |
+ * +-----------------------------------------+
+ * present | PPN | 0 | 00 | ADW | RI | CA | wx |
+ * +- - - - - - - - - - - - - - - - - - - - -+
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 |
+ * +-----------------------------------------+
+ * swap | index | type | 01 | 11 | 00 |
+ * +-----------------------------------------+
+ *
+ * For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
+ * +-----------------------------------------+
+ * present | PPN | 0 | 00 | ADW | RI | CA | w1 |
+ * +-----------------------------------------+
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 01 | 00 |
+ * +-----------------------------------------+
+ *
+ * Legend:
+ * PPN Physical Page Number
+ * ADW software: accessed (young) / dirty / writable
+ * RI ring (0=privileged, 1=user, 2 and 3 are unused)
+ * CA cache attribute: 00 bypass, 01 writeback, 10 writethrough
+ * (11 is invalid and used to mark pages that are not present)
+ * w page is writable (hw)
+ * x page is executable (hw)
+ * index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
+ * (note that the index is always non-zero)
+ * type swap type (5 bits -> 32 types)
+ *
+ * Notes:
+ * - (PROT_NONE) is a special case of 'present' but causes an exception for
+ * any access (read, write, and execute).
+ * - 'multihit-exception' has the highest priority of all MMU exceptions,
+ * so the ring must be set to 'RING_USER' even for 'non-present' pages.
+ * - on older hardware, the exectuable flag was not supported and
+ * used as a 'valid' flag, so it needs to be always set.
+ * - we need to keep track of certain flags in software (dirty and young)
+ * to do this, we use write exceptions and have a separate software w-flag.
+ * - attribute value 1101 (and 1111 on T1050 and earlier) is reserved
+ */
+
+#define _PAGE_ATTRIB_MASK 0xf
+
+#define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
+#define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
+
+#define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
+#define _PAGE_CA_WB (1<<2) /* write-back */
+#define _PAGE_CA_WT (2<<2) /* write-through */
+#define _PAGE_CA_MASK (3<<2)
+#define _PAGE_CA_INVALID (3<<2)
+
+/* We use invalid attribute values to distinguish special pte entries */
+#if XCHAL_HW_VERSION_MAJOR < 2000
+#define _PAGE_HW_VALID 0x01 /* older HW needed this bit set */
+#define _PAGE_NONE 0x04
+#else
+#define _PAGE_HW_VALID 0x00
+#define _PAGE_NONE 0x0f
+#endif
+
+#define _PAGE_USER (1<<4) /* user access (ring=1) */
+
+/* Software */
+#define _PAGE_WRITABLE_BIT 6
+#define _PAGE_WRITABLE (1<<6) /* software: page writable */
+#define _PAGE_DIRTY (1<<7) /* software: page dirty */
+#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
+
+#ifdef CONFIG_MMU
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
+
+#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
+#define PAGE_SHARED_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
+#else
+# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
+#endif
+
+#else /* no mmu */
+
+# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+# define PAGE_NONE __pgprot(0)
+# define PAGE_SHARED __pgprot(0)
+# define PAGE_COPY __pgprot(0)
+# define PAGE_READONLY __pgprot(0)
+# define PAGE_KERNEL __pgprot(0)
+
+#endif
+
+/*
+ * On certain configurations of Xtensa MMUs (eg. the initial Linux config),
+ * the MMU can't do page protection for execute, and considers that the same as
+ * read. Also, write permissions may imply read permissions.
+ * What follows is the closest we can get by reasonable means..
+ * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
+ */
+#define __P000 PAGE_NONE /* private --- */
+#define __P001 PAGE_READONLY /* private --r */
+#define __P010 PAGE_COPY /* private -w- */
+#define __P011 PAGE_COPY /* private -wr */
+#define __P100 PAGE_READONLY_EXEC /* private x-- */
+#define __P101 PAGE_READONLY_EXEC /* private x-r */
+#define __P110 PAGE_COPY_EXEC /* private xw- */
+#define __P111 PAGE_COPY_EXEC /* private xwr */
+
+#define __S000 PAGE_NONE /* shared --- */
+#define __S001 PAGE_READONLY /* shared --r */
+#define __S010 PAGE_SHARED /* shared -w- */
+#define __S011 PAGE_SHARED /* shared -wr */
+#define __S100 PAGE_READONLY_EXEC /* shared x-- */
+#define __S101 PAGE_READONLY_EXEC /* shared x-r */
+#define __S110 PAGE_SHARED_EXEC /* shared xw- */
+#define __S111 PAGE_SHARED_EXEC /* shared xwr */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern unsigned long empty_zero_page[1024];
+
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#ifdef CONFIG_MMU
+extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
+extern void paging_init(void);
+#else
+# define swapper_pg_dir NULL
+static inline void paging_init(void) { }
+#endif
+static inline void pgtable_cache_init(void) { }
+
+/*
+ * The pmd contains the kernel virtual address of the pte page.
+ */
+#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
+
+/*
+ * pte status.
+ */
+# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
+#if XCHAL_HW_VERSION_MAJOR < 2000
+# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
+#else
+# define pte_present(pte) \
+ (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
+ || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
+#endif
+#define pte_clear(mm,addr,ptep) \
+ do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
+
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_special(pte_t pte) { return 0; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+ { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
+static inline pte_t pte_mkclean(pte_t pte)
+ { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
+static inline pte_t pte_mkold(pte_t pte)
+ { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)
+ { pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)
+ { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte)
+ { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte)
+ { return pte; }
+
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pte_same(a,b) (pte_val(a) == pte_val(b))
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/*
+ * Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+static inline void update_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+ __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
+#endif
+
+}
+
+struct mm_struct;
+
+static inline void
+set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+ update_pte(ptep, pteval);
+}
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ update_pte(ptep, pteval);
+}
+
+static inline void
+set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ *pmdp = pmdval;
+}
+
+struct vm_area_struct;
+
+static inline int
+ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_young(pte))
+ return 0;
+ update_pte(ptep, pte_mkold(pte));
+ return 1;
+}
+
+static inline pte_t
+ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, addr, ptep);
+ return pte;
+}
+
+static inline void
+ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ update_pte(ptep, pte_wrprotect(pte));
+}
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
+
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir,address) ((pmd_t*)(dir))
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir,addr) \
+ ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
+#define pte_unmap(pte) do { } while (0)
+
+
+/*
+ * Encode and decode a swap and file entry.
+ */
+#define SWP_TYPE_BITS 5
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+
+#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
+#define __swp_offset(entry) ((entry).val >> 11)
+#define __swp_entry(type,offs) \
+ ((swp_entry_t){((type) << 6) | ((offs) << 11) | \
+ _PAGE_CA_INVALID | _PAGE_USER})
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#endif /* !defined (__ASSEMBLY__) */
+
+
+#ifdef __ASSEMBLY__
+
+/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
+ * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
+ * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long)
+ * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long)
+ *
+ * Note: We require an additional temporary register which can be the same as
+ * the register that holds the address.
+ *
+ * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
+ *
+ */
+#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
+#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
+
+#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
+ _PGD_INDEX(tmp, adr); \
+ addx4 mm, tmp, mm
+
+#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
+ srli pmd, pmd, PAGE_SHIFT; \
+ slli pmd, pmd, PAGE_SHIFT; \
+ addx4 pmd, tmp, pmd
+
+#else
+
+#define kern_addr_valid(addr) (1)
+
+extern void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t *ptep);
+
+typedef pte_t *pte_addr_t;
+
+#endif /* !defined (__ASSEMBLY__) */
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+/* We provide our own get_unmapped_area to cope with
+ * SHM area cache aliasing for userland.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#include <asm-generic/pgtable.h>
+
+#endif /* _XTENSA_PGTABLE_H */
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
new file mode 100644
index 000000000..560483356
--- /dev/null
+++ b/arch/xtensa/include/asm/platform.h
@@ -0,0 +1,105 @@
+/*
+ * Platform specific functions
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PLATFORM_H
+#define _XTENSA_PLATFORM_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include <asm/bootparam.h>
+
+/*
+ * platform_init is called before the mmu is initialized to give the
+ * platform a early hook-up. bp_tag_t is a list of configuration tags
+ * passed from the boot-loader.
+ */
+extern void platform_init(bp_tag_t*);
+
+/*
+ * platform_setup is called from setup_arch with a pointer to the command-line
+ * string.
+ */
+extern void platform_setup (char **);
+
+/*
+ * platform_restart is called to restart the system.
+ */
+extern void platform_restart (void);
+
+/*
+ * platform_halt is called to stop the system and halt.
+ */
+extern void platform_halt (void);
+
+/*
+ * platform_power_off is called to stop the system and power it off.
+ */
+extern void platform_power_off (void);
+
+/*
+ * platform_idle is called from the idle function.
+ */
+extern void platform_idle (void);
+
+/*
+ * platform_heartbeat is called every HZ
+ */
+extern void platform_heartbeat (void);
+
+/*
+ * platform_pcibios_init is called to allow the platform to setup the pci bus.
+ */
+extern void platform_pcibios_init (void);
+
+/*
+ * platform_pcibios_fixup allows to modify the PCI configuration.
+ */
+extern int platform_pcibios_fixup (void);
+
+/*
+ * platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
+ */
+extern void platform_calibrate_ccount (void);
+
+/*
+ * Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector.
+ */
+void cpu_reset(void) __attribute__((noreturn));
+
+/*
+ * Memory caching is platform-dependent in noMMU xtensa configurations.
+ * The following set of functions should be implemented in platform code
+ * in order to enable coherent DMA memory operations when CONFIG_MMU is not
+ * enabled. Default implementations do nothing and issue a warning.
+ */
+
+/*
+ * Check whether p points to a cached memory.
+ */
+bool platform_vaddr_cached(const void *p);
+
+/*
+ * Check whether p points to an uncached memory.
+ */
+bool platform_vaddr_uncached(const void *p);
+
+/*
+ * Return pointer to an uncached view of the cached sddress p.
+ */
+void *platform_vaddr_to_uncached(void *p);
+
+/*
+ * Return pointer to a cached view of the uncached sddress p.
+ */
+void *platform_vaddr_to_cached(void *p);
+
+#endif /* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
new file mode 100644
index 000000000..677bc76c1
--- /dev/null
+++ b/arch/xtensa/include/asm/processor.h
@@ -0,0 +1,250 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_PROCESSOR_H
+#define _XTENSA_PROCESSOR_H
+
+#include <variant/core.h>
+
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#include <asm/regs.h>
+
+/* Assertions. */
+
+#if (XCHAL_HAVE_WINDOWED != 1)
+# error Linux requires the Xtensa Windowed Registers Option.
+#endif
+
+/* Xtensa ABI requires stack alignment to be at least 16 */
+
+#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
+
+#define ARCH_SLAB_MINALIGN STACK_ALIGN
+
+/*
+ * User space process size: 1 GB.
+ * Windowed call ABI requires caller and callee to be located within the same
+ * 1 GB region. The C compiler places trampoline code on the stack for sources
+ * that take the address of a nested C function (a feature used by glibc), so
+ * the 1 GB requirement applies to the stack as well.
+ */
+
+#ifdef CONFIG_MMU
+#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
+#else
+#define TASK_SIZE __XTENSA_UL_CONST(0xffffffff)
+#endif
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+/*
+ * General exception cause assigned to fake NMI. Fake NMI needs to be handled
+ * differently from other interrupts, but it uses common kernel entry/exit
+ * code.
+ */
+
+#define EXCCAUSE_MAPPED_NMI 62
+
+/*
+ * General exception cause assigned to debug exceptions. Debug exceptions go
+ * to their own vector, rather than the general exception vectors (user,
+ * kernel, double); and their specific causes are reported via DEBUGCAUSE
+ * rather than EXCCAUSE. However it is sometimes convenient to redirect debug
+ * exceptions to the general exception mechanism. To do this, an otherwise
+ * unused EXCCAUSE value was assigned to debug exceptions for this purpose.
+ */
+
+#define EXCCAUSE_MAPPED_DEBUG 63
+
+/*
+ * We use DEPC also as a flag to distinguish between double and regular
+ * exceptions. For performance reasons, DEPC might contain the value of
+ * EXCCAUSE for regular exceptions, so we use this definition to mark a
+ * valid double exception address.
+ * (Note: We use it in bgeui, so it should be 64, 128, or 256)
+ */
+
+#define VALID_DOUBLE_EXCEPTION_ADDRESS 64
+
+#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
+#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
+
+#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
+#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
+
+#define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
+#define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)
+
+#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
+
+/* LOCKLEVEL defines the interrupt level that masks all
+ * general-purpose interrupts.
+ */
+#if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
+#define LOCKLEVEL (PROFILING_INTLEVEL - 1)
+#else
+#define LOCKLEVEL XCHAL_EXCM_LEVEL
+#endif
+
+#define TOPLEVEL XCHAL_EXCM_LEVEL
+#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
+
+/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
+ * registers
+ */
+#define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */
+#define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */
+
+#ifndef __ASSEMBLY__
+
+/* Build a valid return address for the specified call winsize.
+ * winsize must be 1 (call4), 2 (call8), or 3 (call12)
+ */
+#define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
+
+/* Convert return address to a valid pc
+ * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ */
+#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp. reg must be in the range [0..4).
+ */
+#define SPILL_SLOT(sp, reg) (*(((unsigned long *)(sp)) - 4 + (reg)))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp for the call8. reg must be in the range [4..8).
+ */
+#define SPILL_SLOT_CALL8(sp, reg) (*(((unsigned long *)(sp)) - 12 + (reg)))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp for the call12. reg must be in the range [4..12).
+ */
+#define SPILL_SLOT_CALL12(sp, reg) (*(((unsigned long *)(sp)) - 16 + (reg)))
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct {
+
+ /* kernel's return address and stack pointer for context switching */
+ unsigned long ra; /* kernel's a0: return address and window call size */
+ unsigned long sp; /* kernel's a1: stack pointer */
+
+ mm_segment_t current_ds; /* see uaccess.h for example uses */
+
+ /* struct xtensa_cpuinfo info; */
+
+ unsigned long bad_vaddr; /* last user fault */
+ unsigned long bad_uaddr; /* last kernel fault accessing user space */
+ unsigned long error_code;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK];
+ struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK];
+#endif
+ /* Make structure 16 bytes aligned. */
+ int align[0] __attribute__ ((aligned(16)));
+};
+
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
+
+#define INIT_THREAD \
+{ \
+ ra: 0, \
+ sp: sizeof(init_stack) + (long) &init_stack, \
+ current_ds: {0}, \
+ /*info: {0}, */ \
+ bad_vaddr: 0, \
+ bad_uaddr: 0, \
+ error_code: 0, \
+}
+
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ * Note: We set-up ps as if we did a call4 to the new pc.
+ * set_thread_state in signal.c depends on it.
+ */
+#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
+ (1 << PS_CALLINC_SHIFT) | \
+ (USER_RING << PS_RING_SHIFT) | \
+ (1 << PS_UM_BIT) | \
+ (1 << PS_EXCM_BIT))
+
+/* Clearing a0 terminates the backtrace. */
+#define start_thread(regs, new_pc, new_sp) \
+ memset(regs, 0, sizeof(*regs)); \
+ regs->pc = new_pc; \
+ regs->ps = USER_PS_VALUE; \
+ regs->areg[1] = new_sp; \
+ regs->areg[0] = 0; \
+ regs->wmask = 1; \
+ regs->depc = 0; \
+ regs->windowbase = 0; \
+ regs->windowstart = 1;
+
+/* Forward declaration */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while(0)
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
+
+#define cpu_relax() barrier()
+
+/* Special register access. */
+
+#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v));
+#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v));
+
+#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
+#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
+
+#ifndef XCHAL_HAVE_EXTERN_REGS
+#define XCHAL_HAVE_EXTERN_REGS 0
+#endif
+
+#if XCHAL_HAVE_EXTERN_REGS
+
+static inline void set_er(unsigned long value, unsigned long addr)
+{
+ asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory");
+}
+
+static inline unsigned long get_er(unsigned long addr)
+{
+ register unsigned long value;
+ asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory");
+ return value;
+}
+
+#endif /* XCHAL_HAVE_EXTERN_REGS */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_PROCESSOR_H */
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
new file mode 100644
index 000000000..3a5c5918a
--- /dev/null
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -0,0 +1,110 @@
+/*
+ * include/asm-xtensa/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _XTENSA_PTRACE_H
+#define _XTENSA_PTRACE_H
+
+#include <asm/kmem_layout.h>
+#include <uapi/asm/ptrace.h>
+
+/*
+ * Kernel stack
+ *
+ * +-----------------------+ -------- STACK_SIZE
+ * | register file | |
+ * +-----------------------+ |
+ * | struct pt_regs | |
+ * +-----------------------+ | ------ PT_REGS_OFFSET
+ * double : 16 bytes spill area : | ^
+ * excetion :- - - - - - - - - - - -: | |
+ * frame : struct pt_regs : | |
+ * :- - - - - - - - - - - -: | |
+ * | | | |
+ * | memory stack | | |
+ * | | | |
+ * ~ ~ ~ ~
+ * ~ ~ ~ ~
+ * | | | |
+ * | | | |
+ * +-----------------------+ | | --- STACK_BIAS
+ * | struct task_struct | | | ^
+ * current --> +-----------------------+ | | |
+ * | struct thread_info | | | |
+ * +-----------------------+ --------
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <asm/coprocessor.h>
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ */
+struct pt_regs {
+ unsigned long pc; /* 4 */
+ unsigned long ps; /* 8 */
+ unsigned long depc; /* 12 */
+ unsigned long exccause; /* 16 */
+ unsigned long excvaddr; /* 20 */
+ unsigned long debugcause; /* 24 */
+ unsigned long wmask; /* 28 */
+ unsigned long lbeg; /* 32 */
+ unsigned long lend; /* 36 */
+ unsigned long lcount; /* 40 */
+ unsigned long sar; /* 44 */
+ unsigned long windowbase; /* 48 */
+ unsigned long windowstart; /* 52 */
+ unsigned long syscall; /* 56 */
+ unsigned long icountlevel; /* 60 */
+ unsigned long scompare1; /* 64 */
+ unsigned long threadptr; /* 68 */
+
+ /* Additional configurable registers that are used by the compiler. */
+ xtregs_opt_t xtregs_opt;
+
+ /* Make sure the areg field is 16 bytes aligned. */
+ int align[0] __attribute__ ((aligned(16)));
+
+ /* current register frame.
+ * Note: The ESF for kernel exceptions ends after 16 registers!
+ */
+ unsigned long areg[16];
+};
+
+#include <variant/core.h>
+
+# define arch_has_single_step() (1)
+# define task_pt_regs(tsk) ((struct pt_regs*) \
+ (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
+# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
+# define instruction_pointer(regs) ((regs)->pc)
+# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
+ (regs)->areg[1]))
+
+# ifndef CONFIG_SMP
+# define profile_pc(regs) instruction_pointer(regs)
+# else
+# define profile_pc(regs) \
+ ({ \
+ in_lock_functions(instruction_pointer(regs)) ? \
+ return_pointer(regs) : instruction_pointer(regs); \
+ })
+# endif
+
+#define user_stack_pointer(regs) ((regs)->areg[1])
+
+#else /* __ASSEMBLY__ */
+
+# include <asm/asm-offsets.h>
+#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _XTENSA_PTRACE_H */
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
new file mode 100644
index 000000000..477594e58
--- /dev/null
+++ b/arch/xtensa/include/asm/regs.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2006 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+#ifndef _XTENSA_REGS_H
+#define _XTENSA_REGS_H
+
+/* Special registers. */
+
+#define SREG_MR 32
+#define SREG_IBREAKENABLE 96
+#define SREG_IBREAKA 128
+#define SREG_DBREAKA 144
+#define SREG_DBREAKC 160
+#define SREG_EPC 176
+#define SREG_EPS 192
+#define SREG_EXCSAVE 208
+#define SREG_CCOMPARE 240
+#define SREG_MISC 244
+
+/* EXCCAUSE register fields */
+
+#define EXCCAUSE_EXCCAUSE_SHIFT 0
+#define EXCCAUSE_EXCCAUSE_MASK 0x3F
+
+#define EXCCAUSE_ILLEGAL_INSTRUCTION 0
+#define EXCCAUSE_SYSTEM_CALL 1
+#define EXCCAUSE_INSTRUCTION_FETCH_ERROR 2
+#define EXCCAUSE_LOAD_STORE_ERROR 3
+#define EXCCAUSE_LEVEL1_INTERRUPT 4
+#define EXCCAUSE_ALLOCA 5
+#define EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6
+#define EXCCAUSE_SPECULATION 7
+#define EXCCAUSE_PRIVILEGED 8
+#define EXCCAUSE_UNALIGNED 9
+#define EXCCAUSE_INSTR_DATA_ERROR 12
+#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
+#define EXCCAUSE_INSTR_ADDR_ERROR 14
+#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
+#define EXCCAUSE_ITLB_MISS 16
+#define EXCCAUSE_ITLB_MULTIHIT 17
+#define EXCCAUSE_ITLB_PRIVILEGE 18
+#define EXCCAUSE_ITLB_SIZE_RESTRICTION 19
+#define EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20
+#define EXCCAUSE_DTLB_MISS 24
+#define EXCCAUSE_DTLB_MULTIHIT 25
+#define EXCCAUSE_DTLB_PRIVILEGE 26
+#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
+#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
+#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
+#define EXCCAUSE_COPROCESSOR0_DISABLED 32
+#define EXCCAUSE_COPROCESSOR1_DISABLED 33
+#define EXCCAUSE_COPROCESSOR2_DISABLED 34
+#define EXCCAUSE_COPROCESSOR3_DISABLED 35
+#define EXCCAUSE_COPROCESSOR4_DISABLED 36
+#define EXCCAUSE_COPROCESSOR5_DISABLED 37
+#define EXCCAUSE_COPROCESSOR6_DISABLED 38
+#define EXCCAUSE_COPROCESSOR7_DISABLED 39
+#define EXCCAUSE_N 64
+
+/* PS register fields. */
+
+#define PS_WOE_BIT 18
+#define PS_CALLINC_SHIFT 16
+#define PS_CALLINC_MASK 0x00030000
+#define PS_OWB_SHIFT 8
+#define PS_OWB_WIDTH 4
+#define PS_OWB_MASK 0x00000F00
+#define PS_RING_SHIFT 6
+#define PS_RING_MASK 0x000000C0
+#define PS_UM_BIT 5
+#define PS_EXCM_BIT 4
+#define PS_INTLEVEL_SHIFT 0
+#define PS_INTLEVEL_WIDTH 4
+#define PS_INTLEVEL_MASK 0x0000000F
+
+/* DBREAKCn register fields. */
+
+#define DBREAKC_MASK_BIT 0
+#define DBREAKC_MASK_MASK 0x0000003F
+#define DBREAKC_LOAD_BIT 30
+#define DBREAKC_LOAD_MASK 0x40000000
+#define DBREAKC_STOR_BIT 31
+#define DBREAKC_STOR_MASK 0x80000000
+
+/* DEBUGCAUSE register fields. */
+
+#define DEBUGCAUSE_DBNUM_MASK 0xf00
+#define DEBUGCAUSE_DBNUM_SHIFT 8 /* First bit of DBNUM field */
+#define DEBUGCAUSE_DEBUGINT_BIT 5 /* External debug interrupt */
+#define DEBUGCAUSE_BREAKN_BIT 4 /* BREAK.N instruction */
+#define DEBUGCAUSE_BREAK_BIT 3 /* BREAK instruction */
+#define DEBUGCAUSE_DBREAK_BIT 2 /* DBREAK match */
+#define DEBUGCAUSE_IBREAK_BIT 1 /* IBREAK match */
+#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
+
+#endif /* _XTENSA_SPECREG_H */
diff --git a/arch/xtensa/include/asm/segment.h b/arch/xtensa/include/asm/segment.h
new file mode 100644
index 000000000..98964ad15
--- /dev/null
+++ b/arch/xtensa/include/asm/segment.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/segment.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SEGMENT_H
+#define _XTENSA_SEGMENT_H
+
+#include <linux/uaccess.h>
+
+#endif /* _XTENSA_SEGEMENT_H */
diff --git a/arch/xtensa/include/asm/serial.h b/arch/xtensa/include/asm/serial.h
new file mode 100644
index 000000000..a8a249326
--- /dev/null
+++ b/arch/xtensa/include/asm/serial.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-xtensa/serial.h
+ *
+ * Configuration details for 8250, 16450, 16550, etc. serial ports
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SERIAL_H
+#define _XTENSA_SERIAL_H
+
+#include <platform/serial.h>
+
+#endif /* _XTENSA_SERIAL_H */
diff --git a/arch/xtensa/include/asm/shmparam.h b/arch/xtensa/include/asm/shmparam.h
new file mode 100644
index 000000000..c8cc16c3d
--- /dev/null
+++ b/arch/xtensa/include/asm/shmparam.h
@@ -0,0 +1,21 @@
+/*
+ * include/asm-xtensa/shmparam.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_SHMPARAM_H
+#define _XTENSA_SHMPARAM_H
+
+/*
+ * Xtensa can have variable size caches, and if
+ * the size of single way is larger than the page size,
+ * then we have to start worrying about cache aliasing
+ * problems.
+ */
+
+#define SHMLBA ((PAGE_SIZE > DCACHE_WAY_SIZE)? PAGE_SIZE : DCACHE_WAY_SIZE)
+
+#endif /* _XTENSA_SHMPARAM_H */
diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
new file mode 100644
index 000000000..de169b4ea
--- /dev/null
+++ b/arch/xtensa/include/asm/signal.h
@@ -0,0 +1,23 @@
+/*
+ * include/asm-xtensa/signal.h
+ *
+ * Swiped from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _XTENSA_SIGNAL_H
+#define _XTENSA_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+#ifndef __ASSEMBLY__
+#define __ARCH_HAS_SA_RESTORER
+
+#include <asm/sigcontext.h>
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_SIGNAL_H */
diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h
new file mode 100644
index 000000000..4e43f5643
--- /dev/null
+++ b/arch/xtensa/include/asm/smp.h
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SMP_H
+#define _XTENSA_SMP_H
+
+#ifdef CONFIG_SMP
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define cpu_logical_map(cpu) (cpu)
+
+struct start_info {
+ unsigned long stack;
+};
+extern struct start_info start_info;
+
+struct cpumask;
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_single_ipi(int cpu);
+
+void smp_init_cpus(void);
+void secondary_init_irq(void);
+void ipi_init(void);
+struct seq_file;
+void show_ipi_list(struct seq_file *p, int prec);
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+void __cpu_die(unsigned int cpu);
+int __cpu_disable(void);
+void cpu_die(void);
+void cpu_restart(void);
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* CONFIG_SMP */
+
+#endif /* _XTENSA_SMP_H */
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
new file mode 100644
index 000000000..c6e1290dc
--- /dev/null
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -0,0 +1,199 @@
+/*
+ * include/asm-xtensa/spinlock.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SPINLOCK_H
+#define _XTENSA_SPINLOCK_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+/*
+ * spinlock
+ *
+ * There is at most one owner of a spinlock. There are not different
+ * types of spinlock owners like there are for rwlocks (see below).
+ *
+ * When trying to obtain a spinlock, the function "spins" forever, or busy-
+ * waits, until the lock is obtained. When spinning, presumably some other
+ * owner will soon give up the spinlock making it available to others. Use
+ * the trylock functions to avoid spinning forever.
+ *
+ * possible values:
+ *
+ * 0 nobody owns the spinlock
+ * 1 somebody owns the spinlock
+ */
+
+#define arch_spin_is_locked(x) ((x)->slock != 0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ "1: movi %0, 1\n"
+ " s32c1i %0, %1, 0\n"
+ " bnez %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ " movi %0, 1\n"
+ " s32c1i %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+
+ return tmp == 0 ? 1 : 0;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " s32ri %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+}
+
+/*
+ * rwlock
+ *
+ * Read-write locks are really a more flexible spinlock. They allow
+ * multiple readers but only one writer. Write ownership is exclusive
+ * (i.e., all other readers and writers are blocked from ownership while
+ * there is a write owner). These rwlocks are unfair to writers. Writers
+ * can be starved for an indefinite time by readers.
+ *
+ * possible values:
+ *
+ * 0 nobody owns the rwlock
+ * >0 one or more readers own the rwlock
+ * (the positive value is the actual number of readers)
+ * 0x80000000 one writer owns the rwlock, no other writers, no readers
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ "1: movi %0, 1\n"
+ " slli %0, %0, 31\n"
+ " s32c1i %0, %1, 0\n"
+ " bnez %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ " movi %0, 1\n"
+ " slli %0, %0, 31\n"
+ " s32c1i %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+
+ return tmp == 0 ? 1 : 0;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " s32ri %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+ unsigned long result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " bltz %1, 1b\n"
+ " wsr %1, scompare1\n"
+ " addi %0, %1, 1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned long result;
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " l32i %1, %2, 0\n"
+ " addi %0, %1, 1\n"
+ " bltz %0, 1f\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " sub %0, %0, %1\n"
+ "1:\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+
+ return result == 0;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " addi %0, %1, -1\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp1), "=&a" (tmp2)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+#endif /* _XTENSA_SPINLOCK_H */
diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h
new file mode 100644
index 000000000..bb1fe6c18
--- /dev/null
+++ b/arch/xtensa/include/asm/spinlock_types.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/arch/xtensa/include/asm/stackprotector.h b/arch/xtensa/include/asm/stackprotector.h
new file mode 100644
index 000000000..e368f94fd
--- /dev/null
+++ b/arch/xtensa/include/asm/stackprotector.h
@@ -0,0 +1,40 @@
+/*
+ * GCC stack protector support.
+ *
+ * (This is directly adopted from the ARM implementation)
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h
new file mode 100644
index 000000000..fe06e8ed1
--- /dev/null
+++ b/arch/xtensa/include/asm/stacktrace.h
@@ -0,0 +1,44 @@
+/*
+ * arch/xtensa/include/asm/stacktrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_STACKTRACE_H
+#define _XTENSA_STACKTRACE_H
+
+#include <linux/sched.h>
+
+struct stackframe {
+ unsigned long pc;
+ unsigned long sp;
+};
+
+static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+{
+ unsigned long *sp;
+
+ if (!task || task == current)
+ __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
+ else
+ sp = (unsigned long *)task->thread.sp;
+
+ return sp;
+}
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data);
+
+void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
+ int (*kfn)(struct stackframe *frame, void *data),
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data);
+void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data);
+
+#endif /* _XTENSA_STACKTRACE_H */
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
new file mode 100644
index 000000000..89b51a0c7
--- /dev/null
+++ b/arch/xtensa/include/asm/string.h
@@ -0,0 +1,140 @@
+/*
+ * include/asm-xtensa/string.h
+ *
+ * These trivial string functions are considered part of the public domain.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+/* We should optimize these. See arch/xtensa/lib/strncpy_user.S */
+
+#ifndef _XTENSA_STRING_H
+#define _XTENSA_STRING_H
+
+#define __HAVE_ARCH_STRCPY
+static inline char *strcpy(char *__dest, const char *__src)
+{
+ register char *__xdest = __dest;
+ unsigned long __dummy;
+
+ __asm__ __volatile__("1:\n\t"
+ "l8ui %2, %1, 0\n\t"
+ "s8i %2, %0, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "addi %0, %0, 1\n\t"
+ "bnez %2, 1b\n\t"
+ : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
+ : "0" (__dest), "1" (__src)
+ : "memory");
+
+ return __xdest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+static inline char *strncpy(char *__dest, const char *__src, size_t __n)
+{
+ register char *__xdest = __dest;
+ unsigned long __dummy;
+
+ if (__n == 0)
+ return __xdest;
+
+ __asm__ __volatile__(
+ "1:\n\t"
+ "l8ui %2, %1, 0\n\t"
+ "s8i %2, %0, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "bne %1, %5, 1b\n"
+ "2:"
+ : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
+ : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n)
+ : "memory");
+
+ return __xdest;
+}
+
+#define __HAVE_ARCH_STRCMP
+static inline int strcmp(const char *__cs, const char *__ct)
+{
+ register int __res;
+ unsigned long __dummy;
+
+ __asm__ __volatile__(
+ "1:\n\t"
+ "l8ui %3, %1, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "l8ui %2, %0, 0\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "beq %2, %3, 1b\n"
+ "2:\n\t"
+ "sub %2, %2, %3"
+ : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
+ : "0" (__cs), "1" (__ct));
+
+ return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
+{
+ register int __res;
+ unsigned long __dummy;
+
+ __asm__ __volatile__(
+ "mov %2, %3\n"
+ "1:\n\t"
+ "beq %0, %6, 2f\n\t"
+ "l8ui %3, %1, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "l8ui %2, %0, 0\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "beqz %3, 2f\n\t"
+ "beq %2, %3, 1b\n"
+ "2:\n\t"
+ "sub %2, %2, %3"
+ : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
+ : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n));
+
+ return __res;
+}
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
+
+/* Don't build bcopy at all ... */
+#define __HAVE_ARCH_BCOPY
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+#endif
+
+#endif /* _XTENSA_STRING_H */
diff --git a/arch/xtensa/include/asm/switch_to.h b/arch/xtensa/include/asm/switch_to.h
new file mode 100644
index 000000000..6b73bf0eb
--- /dev/null
+++ b/arch/xtensa/include/asm/switch_to.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SWITCH_TO_H
+#define _XTENSA_SWITCH_TO_H
+
+/* * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ */
+extern void *_switch_to(void *last, void *next);
+
+#define switch_to(prev,next,last) \
+do { \
+ (last) = _switch_to(prev, next); \
+} while(0)
+
+#endif /* _XTENSA_SWITCH_TO_H */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
new file mode 100644
index 000000000..3673ff1f1
--- /dev/null
+++ b/arch/xtensa/include/asm/syscall.h
@@ -0,0 +1,27 @@
+/*
+ * include/asm-xtensa/syscall.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+struct pt_regs;
+asmlinkage long xtensa_ptrace(long, long, long, long);
+asmlinkage long xtensa_sigreturn(struct pt_regs*);
+asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
+asmlinkage long xtensa_shmat(int, char __user *, int);
+asmlinkage long xtensa_fadvise64_64(int, int,
+ unsigned long long, unsigned long long);
+
+/* Should probably move to linux/syscalls.h */
+struct pollfd;
+asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
+ fd_set __user *exp, struct timespec __user *tsp,
+ void __user *sig);
+asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
+ struct timespec __user *tsp,
+ const sigset_t __user *sigmask,
+ size_t sigsetsize);
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
new file mode 100644
index 000000000..552cdfd85
--- /dev/null
+++ b/arch/xtensa/include/asm/sysmem.h
@@ -0,0 +1,19 @@
+/*
+ * sysmem-related prototypes.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_SYSMEM_H
+#define _XTENSA_SYSMEM_H
+
+#include <linux/memblock.h>
+
+void bootmem_init(void);
+void zones_init(void);
+
+#endif /* _XTENSA_SYSMEM_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
new file mode 100644
index 000000000..2bd19ae61
--- /dev/null
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -0,0 +1,135 @@
+/*
+ * include/asm-xtensa/thread_info.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_THREAD_INFO_H
+#define _XTENSA_THREAD_INFO_H
+
+#include <asm/kmem_layout.h>
+
+#define CURRENT_SHIFT KERNEL_STACK_SHIFT
+
+#ifndef __ASSEMBLY__
+# include <asm/processor.h>
+#endif
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+
+#ifndef __ASSEMBLY__
+
+#if XTENSA_HAVE_COPROCESSORS
+
+typedef struct xtregs_coprocessor {
+ xtregs_cp0_t cp0;
+ xtregs_cp1_t cp1;
+ xtregs_cp2_t cp2;
+ xtregs_cp3_t cp3;
+ xtregs_cp4_t cp4;
+ xtregs_cp5_t cp5;
+ xtregs_cp6_t cp6;
+ xtregs_cp7_t cp7;
+} xtregs_coprocessor_t;
+
+#endif
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ unsigned long status; /* thread-synchronous flags */
+ __u32 cpu; /* current CPU */
+ __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
+
+ mm_segment_t addr_limit; /* thread address space */
+
+ unsigned long cpenable;
+
+ /* Allocate storage for extra user states and coprocessor states. */
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_coprocessor_t xtregs_cp;
+#endif
+ xtregs_user_t xtregs_user;
+};
+
+#endif
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+}
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ struct thread_info *ti;
+ __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
+ "xor %0, a1, %0" : "=&r" (ti) : );
+ return ti;
+}
+
+#else /* !__ASSEMBLY__ */
+
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg,sp) \
+ extui reg, sp, 0, CURRENT_SHIFT; \
+ xor reg, sp, reg
+#endif
+
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
+#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
+#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
+#define TIF_DB_DISABLED 8 /* debug trap disabled for syscall */
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+
+#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
+#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
+
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
+
+#define THREAD_SIZE KERNEL_STACK_SIZE
+#define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)
+
+#endif /* _XTENSA_THREAD_INFO */
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
new file mode 100644
index 000000000..d866bc847
--- /dev/null
+++ b/arch/xtensa/include/asm/timex.h
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TIMEX_H
+#define _XTENSA_TIMEX_H
+
+#include <asm/processor.h>
+#include <linux/stringify.h>
+
+#if XCHAL_NUM_TIMERS > 0 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 0
+# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
+#elif XCHAL_NUM_TIMERS > 1 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 1
+# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
+#elif XCHAL_NUM_TIMERS > 2 && \
+ XTENSA_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
+# define LINUX_TIMER 2
+# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
+#else
+# error "Bad timer number for Linux configurations!"
+#endif
+
+extern unsigned long ccount_freq;
+
+void local_timer_setup(unsigned cpu);
+
+/*
+ * Register access.
+ */
+
+#define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r))
+#define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r))
+#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
+#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
+
+static inline unsigned long get_ccount (void)
+{
+ unsigned long ccount;
+ RSR_CCOUNT(ccount);
+ return ccount;
+}
+
+static inline void set_ccount (unsigned long ccount)
+{
+ WSR_CCOUNT(ccount);
+}
+
+static inline unsigned long get_linux_timer (void)
+{
+ unsigned ccompare;
+ RSR_CCOMPARE(LINUX_TIMER, ccompare);
+ return ccompare;
+}
+
+static inline void set_linux_timer (unsigned long ccompare)
+{
+ WSR_CCOMPARE(LINUX_TIMER, ccompare);
+}
+
+#include <asm-generic/timex.h>
+
+#endif /* _XTENSA_TIMEX_H */
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
new file mode 100644
index 000000000..0d766f9c1
--- /dev/null
+++ b/arch/xtensa/include/asm/tlb.h
@@ -0,0 +1,47 @@
+/*
+ * include/asm-xtensa/tlb.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TLB_H
+#define _XTENSA_TLB_H
+
+#include <asm/cache.h>
+#include <asm/page.h>
+
+#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
+
+/* Note, read http://lkml.org/lkml/2004/1/15/6 */
+
+# define tlb_start_vma(tlb,vma) do { } while (0)
+# define tlb_end_vma(tlb,vma) do { } while (0)
+
+#else
+
+# define tlb_start_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+ } while(0)
+
+# define tlb_end_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+ } while(0)
+
+#endif
+
+#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
+
+#endif /* _XTENSA_TLB_H */
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
new file mode 100644
index 000000000..06875feb2
--- /dev/null
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -0,0 +1,208 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TLBFLUSH_H
+#define _XTENSA_TLBFLUSH_H
+
+#include <linux/stringify.h>
+#include <asm/processor.h>
+
+#define DTLB_WAY_PGD 7
+
+#define ITLB_ARF_WAYS 4
+#define DTLB_ARF_WAYS 4
+
+#define ITLB_HIT_BIT 3
+#define DTLB_HIT_BIT 4
+
+#ifndef __ASSEMBLY__
+
+/* TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(mm, vmaddr) flushes a single page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ */
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long page);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#ifdef CONFIG_SMP
+
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *);
+void flush_tlb_page(struct vm_area_struct *, unsigned long);
+void flush_tlb_range(struct vm_area_struct *, unsigned long,
+ unsigned long);
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#else /* !CONFIG_SMP */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
+ end)
+#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
+ end)
+
+#endif /* CONFIG_SMP */
+
+/* TLB operations. */
+
+static inline unsigned long itlb_probe(unsigned long addr)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
+ return tmp;
+}
+
+static inline unsigned long dtlb_probe(unsigned long addr)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
+ return tmp;
+}
+
+static inline void invalidate_itlb_entry (unsigned long probe)
+{
+ __asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
+}
+
+static inline void invalidate_dtlb_entry (unsigned long probe)
+{
+ __asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
+}
+
+/* Use the .._no_isync functions with caution. Generally, these are
+ * handy for bulk invalidates followed by a single 'isync'. The
+ * caller must follow up with an 'isync', which can be relatively
+ * expensive on some Xtensa implementations.
+ */
+static inline void invalidate_itlb_entry_no_isync (unsigned entry)
+{
+ /* Caller must follow up with 'isync'. */
+ __asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
+}
+
+static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
+{
+ /* Caller must follow up with 'isync'. */
+ __asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
+}
+
+static inline void set_itlbcfg_register (unsigned long val)
+{
+ __asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
+ : : "a" (val));
+}
+
+static inline void set_dtlbcfg_register (unsigned long val)
+{
+ __asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t"
+ : : "a" (val));
+}
+
+static inline void set_ptevaddr_register (unsigned long val)
+{
+ __asm__ __volatile__(" wsr %0, ptevaddr; isync\n"
+ : : "a" (val));
+}
+
+static inline unsigned long read_ptevaddr_register (void)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp));
+ return tmp;
+}
+
+static inline void write_dtlb_entry (pte_t entry, int way)
+{
+ __asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
+ : : "r" (way), "r" (entry) );
+}
+
+static inline void write_itlb_entry (pte_t entry, int way)
+{
+ __asm__ __volatile__("witlb %1, %0; isync\n\t"
+ : : "r" (way), "r" (entry) );
+}
+
+static inline void invalidate_page_directory (void)
+{
+ invalidate_dtlb_entry (DTLB_WAY_PGD);
+ invalidate_dtlb_entry (DTLB_WAY_PGD+1);
+ invalidate_dtlb_entry (DTLB_WAY_PGD+2);
+}
+
+static inline void invalidate_itlb_mapping (unsigned address)
+{
+ unsigned long tlb_entry;
+ if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
+ invalidate_itlb_entry(tlb_entry);
+}
+
+static inline void invalidate_dtlb_mapping (unsigned address)
+{
+ unsigned long tlb_entry;
+ if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
+ invalidate_dtlb_entry(tlb_entry);
+}
+
+#define check_pgt_cache() do { } while (0)
+
+
+/*
+ * DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
+ * ISA and exist only for test purposes..
+ * You may find it helpful for MMU debugging, however.
+ *
+ * 'at' is the unmodified input register
+ * 'as' is the output register, as follows (specific to the Linux config):
+ *
+ * as[31..12] contain the virtual address
+ * as[11..08] are meaningless
+ * as[07..00] contain the asid
+ */
+
+static inline unsigned long read_dtlb_virtual (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_dtlb_translation (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_itlb_virtual (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+static inline unsigned long read_itlb_translation (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_TLBFLUSH_H */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
new file mode 100644
index 000000000..f5cd7a7e6
--- /dev/null
+++ b/arch/xtensa/include/asm/traps.h
@@ -0,0 +1,120 @@
+/*
+ * arch/xtensa/include/asm/traps.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Tensilica Inc.
+ */
+#ifndef _XTENSA_TRAPS_H
+#define _XTENSA_TRAPS_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Per-CPU exception handling data structure.
+ * EXCSAVE1 points to it.
+ */
+struct exc_table {
+ /* Kernel Stack */
+ void *kstk;
+ /* Double exception save area for a0 */
+ unsigned long double_save;
+ /* Fixup handler */
+ void *fixup;
+ /* For passing a parameter to fixup */
+ void *fixup_param;
+ /* For fast syscall handler */
+ unsigned long syscall_save;
+ /* Fast user exception handlers */
+ void *fast_user_handler[EXCCAUSE_N];
+ /* Fast kernel exception handlers */
+ void *fast_kernel_handler[EXCCAUSE_N];
+ /* Default C-Handlers */
+ void *default_handler[EXCCAUSE_N];
+};
+
+/*
+ * handler must be either of the following:
+ * void (*)(struct pt_regs *regs);
+ * void (*)(struct pt_regs *regs, unsigned long exccause);
+ */
+extern void * __init trap_set_handler(int cause, void *handler);
+extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
+void fast_second_level_miss(void);
+
+/* Initialize minimal exc_table structure sufficient for basic paging */
+static inline void __init early_trap_init(void)
+{
+ static struct exc_table exc_table __initdata = {
+ .fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
+ fast_second_level_miss,
+ };
+ __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table));
+}
+
+void secondary_trap_init(void);
+
+static inline void spill_registers(void)
+{
+#if XCHAL_NUM_AREGS > 16
+ __asm__ __volatile__ (
+ " call8 1f\n"
+ " _j 2f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+#if XCHAL_NUM_AREGS == 32
+ " _entry a1, 32\n"
+ " addi a8, a0, 3\n"
+ " _entry a1, 16\n"
+ " mov a12, a12\n"
+ " retw\n"
+#else
+ " _entry a1, 48\n"
+ " call12 1f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
+ " _entry a1, 48\n"
+ " mov a12, a0\n"
+ " .endr\n"
+ " _entry a1, 16\n"
+#if XCHAL_NUM_AREGS % 12 == 0
+ " mov a12, a12\n"
+#elif XCHAL_NUM_AREGS % 12 == 4
+ " mov a4, a4\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+ " mov a8, a8\n"
+#endif
+ " retw\n"
+#endif
+ "2:\n"
+ : : : "a8", "a9", "memory");
+#else
+ __asm__ __volatile__ (
+ " mov a12, a12\n"
+ : : : "memory");
+#endif
+}
+
+struct debug_table {
+ /* Pointer to debug exception handler */
+ void (*debug_exception)(void);
+ /* Temporary register save area */
+ unsigned long debug_save[1];
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ /* Save area for DBREAKC registers */
+ unsigned long dbreakc_save[XCHAL_NUM_DBREAK];
+ /* Saved ICOUNT register */
+ unsigned long icount_save;
+ /* Saved ICOUNTLEVEL register */
+ unsigned long icount_level_save;
+#endif
+};
+
+void debug_exception(void);
+
+#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/asm/types.h b/arch/xtensa/include/asm/types.h
new file mode 100644
index 000000000..2b410b8c7
--- /dev/null
+++ b/arch/xtensa/include/asm/types.h
@@ -0,0 +1,23 @@
+/*
+ * include/asm-xtensa/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _XTENSA_TYPES_H
+#define _XTENSA_TYPES_H
+
+#include <uapi/asm/types.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+
+#define BITS_PER_LONG 32
+
+#endif
+#endif /* _XTENSA_TYPES_H */
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
new file mode 100644
index 000000000..da4effe27
--- /dev/null
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -0,0 +1,311 @@
+/*
+ * include/asm-xtensa/uaccess.h
+ *
+ * User space memory access functions
+ *
+ * These routines provide basic accessing functions to the user memory
+ * space for the kernel. This header file provides functions such as:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UACCESS_H
+#define _XTENSA_UACCESS_H
+
+#include <linux/prefetch.h>
+#include <asm/types.h>
+#include <asm/extable.h>
+
+/*
+ * The fs value determines whether argument validity checking should
+ * be performed or not. If get_fs() == USER_DS, checking is
+ * performed, with get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons (Data Segment Register?), these macros are
+ * grossly misnamed.
+ */
+
+#define KERNEL_DS ((mm_segment_t) { 0 })
+#define USER_DS ((mm_segment_t) { 1 })
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->thread.current_ds)
+#define set_fs(val) (current->thread.current_ds = (val))
+
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#define __kernel_ok (uaccess_kernel())
+#define __user_ok(addr, size) \
+ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
+#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
+#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
+
+#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
+
+/*
+ * These are the main single-value transfer routines. They
+ * automatically use the right size if we just have the right pointer
+ * type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in
+ * "get_user()" and yet we don't want to do any pointers, because that
+ * is too much of a performance impact. Thus we have a few rather ugly
+ * macros here, and hide all the uglyness from the user.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
+#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ */
+#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+
+extern long __put_user_bad(void);
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err; \
+ __put_user_size((x), (ptr), (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ int __cb; \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \
+ case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \
+ case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
+ case 8: { \
+ __typeof__(*ptr) __v64 = x; \
+ retval = __copy_to_user(ptr, &__v64, 8); \
+ break; \
+ } \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+
+/*
+ * Consider a case of a user single load/store would cause both an
+ * unaligned exception and an MMU-related exception (unaligned
+ * exceptions happen first):
+ *
+ * User code passes a bad variable ptr to a system call.
+ * Kernel tries to access the variable.
+ * Unaligned exception occurs.
+ * Unaligned exception handler tries to make aligned accesses.
+ * Double exception occurs for MMU-related cause (e.g., page not mapped).
+ * do_page_fault() thinks the fault address belongs to the kernel, not the
+ * user, and panics.
+ *
+ * The kernel currently prohibits user unaligned accesses. We use the
+ * __check_align_* macros to check for unaligned addresses before
+ * accessing user space so we don't crash the kernel. Both
+ * __put_user_asm and __get_user_asm use these alignment macros, so
+ * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
+ * sync.
+ */
+
+#define __check_align_1 ""
+
+#define __check_align_2 \
+ " _bbci.l %3, 0, 1f \n" \
+ " movi %0, %4 \n" \
+ " _j 2f \n"
+
+#define __check_align_4 \
+ " _bbsi.l %3, 0, 0f \n" \
+ " _bbci.l %3, 1, 1f \n" \
+ "0: movi %0, %4 \n" \
+ " _j 2f \n"
+
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ *
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
+ */
+#define __put_user_asm(x, addr, err, align, insn, cb) \
+__asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %2, %3, 0 \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: \n" \
+ " .long 2b \n" \
+ "5: \n" \
+ " l32r %1, 4b \n" \
+ " movi %0, %4 \n" \
+ " jx %1 \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :"=r" (err), "=r" (cb) \
+ :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err, __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT, __gu_val = 0; \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
+ if (access_ok(VERIFY_READ, __gu_addr, size)) \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ int __cb; \
+ retval = 0; \
+ switch (size) { \
+ case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
+ case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
+ case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
+ case 8: retval = __copy_from_user(&x, ptr, 8); break; \
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+
+/*
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
+ */
+#define __get_user_asm(x, addr, err, align, insn, cb) \
+__asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %2, %3, 0 \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: \n" \
+ " .long 2b \n" \
+ "5: \n" \
+ " l32r %1, 4b \n" \
+ " movi %2, 0 \n" \
+ " movi %0, %4 \n" \
+ " jx %1 \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :"=r" (err), "=r" (cb), "=r" (x) \
+ :"r" (addr), "i" (-EFAULT), "0" (err))
+
+
+/*
+ * Copy to/from user space
+ */
+
+extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ prefetchw(to);
+ return __xtensa_copy_user(to, (__force const void *)from, n);
+}
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ prefetch(from);
+ return __xtensa_copy_user((__force void *)to, from, n);
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+/*
+ * We need to return the number of bytes not cleared. Our memset()
+ * returns zero if a problem occurs while accessing user-space memory.
+ * In that event, return no memory cleared. Otherwise, zero for
+ * success.
+ */
+
+static inline unsigned long
+__xtensa_clear_user(void *addr, unsigned long size)
+{
+ if (!__memset(addr, 0, size))
+ return size;
+ return 0;
+}
+
+static inline unsigned long
+clear_user(void *addr, unsigned long size)
+{
+ if (access_ok(VERIFY_WRITE, addr, size))
+ return __xtensa_clear_user(addr, size);
+ return size ? -EFAULT : 0;
+}
+
+#define __clear_user __xtensa_clear_user
+
+
+#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
+
+extern long __strncpy_user(char *, const char *, long);
+
+static inline long
+strncpy_from_user(char *dst, const char *src, long count)
+{
+ if (access_ok(VERIFY_READ, src, 1))
+ return __strncpy_user(dst, src, count);
+ return -EFAULT;
+}
+#else
+long strncpy_from_user(char *dst, const char __user *src, long count);
+#endif
+
+/*
+ * Return the size of a string (including the ending 0!)
+ */
+extern long __strnlen_user(const char *, long);
+
+static inline long strnlen_user(const char *str, long len)
+{
+ unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
+
+ if ((unsigned long)str > top)
+ return 0;
+ return __strnlen_user(str, len);
+}
+
+#endif /* _XTENSA_UACCESS_H */
diff --git a/arch/xtensa/include/asm/ucontext.h b/arch/xtensa/include/asm/ucontext.h
new file mode 100644
index 000000000..94c94ed3e
--- /dev/null
+++ b/arch/xtensa/include/asm/ucontext.h
@@ -0,0 +1,22 @@
+/*
+ * include/asm-xtensa/ucontext.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UCONTEXT_H
+#define _XTENSA_UCONTEXT_H
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* _XTENSA_UCONTEXT_H */
diff --git a/arch/xtensa/include/asm/unaligned.h b/arch/xtensa/include/asm/unaligned.h
new file mode 100644
index 000000000..8e7ed046b
--- /dev/null
+++ b/arch/xtensa/include/asm/unaligned.h
@@ -0,0 +1,29 @@
+/*
+ * Xtensa doesn't handle unaligned accesses efficiently.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _ASM_XTENSA_UNALIGNED_H
+#define _ASM_XTENSA_UNALIGNED_H
+
+#include <asm/byteorder.h>
+
+#ifdef __LITTLE_ENDIAN
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#else
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#endif
+
+#endif /* _ASM_XTENSA_UNALIGNED_H */
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
new file mode 100644
index 000000000..ed66db3bc
--- /dev/null
+++ b/arch/xtensa/include/asm/unistd.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _XTENSA_UNISTD_H
+#define _XTENSA_UNISTD_H
+
+#define __ARCH_WANT_SYS_CLONE
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_GETPGRP
+
+/*
+ * Ignore legacy system calls in the checksyscalls.sh script
+ */
+
+#define __IGNORE_fork /* use clone */
+#define __IGNORE_time
+#define __IGNORE_alarm /* use setitimer */
+#define __IGNORE_pause
+#define __IGNORE_mmap /* use mmap2 */
+#define __IGNORE_vfork /* use clone */
+#define __IGNORE_fadvise64 /* use fadvise64_64 */
+
+#endif /* _XTENSA_UNISTD_H */
diff --git a/arch/xtensa/include/asm/user.h b/arch/xtensa/include/asm/user.h
new file mode 100644
index 000000000..2c3ed2335
--- /dev/null
+++ b/arch/xtensa/include/asm/user.h
@@ -0,0 +1,20 @@
+/*
+ * include/asm-xtensa/user.h
+ *
+ * Xtensa Processor version.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_USER_H
+#define _XTENSA_USER_H
+
+/* This file usually defines a 'struct user' structure. However, it it only
+ * used for a.out file, which are not supported on Xtensa.
+ */
+
+#endif /* _XTENSA_USER_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
new file mode 100644
index 000000000..2d3e0cca9
--- /dev/null
+++ b/arch/xtensa/include/asm/vectors.h
@@ -0,0 +1,125 @@
+/*
+ * arch/xtensa/include/asm/xchal_vaddr_remap.h
+ *
+ * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
+ * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
+ * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica Inc.
+ *
+ * Pete Delaney <piet@tensilica.com>
+ * Marc Gauthier <marc@tensilica.com
+ */
+
+#ifndef _XTENSA_VECTORS_H
+#define _XTENSA_VECTORS_H
+
+#include <variant/core.h>
+#include <asm/kmem_layout.h>
+
+#if XCHAL_HAVE_PTP_MMU
+#define XCHAL_KIO_CACHED_VADDR 0xe0000000
+#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
+#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#else
+#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
+#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
+#endif
+#define XCHAL_KIO_SIZE 0x10000000
+
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
+#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
+#ifndef __ASSEMBLY__
+extern unsigned long xtensa_kio_paddr;
+
+static inline unsigned long xtensa_get_kio_paddr(void)
+{
+ return xtensa_kio_paddr;
+}
+#endif
+#else
+#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
+#endif
+
+#if defined(CONFIG_MMU)
+
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/* Image Virtual Start Address */
+#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
+ CONFIG_KERNEL_LOAD_ADDRESS - \
+ XCHAL_KSEG_PADDR)
+#else
+#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
+#endif
+
+#else /* !defined(CONFIG_MMU) */
+ /* MMU Not being used - Virtual == Physical */
+
+/* Location of the start of the kernel text, _start */
+#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
+
+
+#endif /* CONFIG_MMU */
+
+#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
+#ifdef CONFIG_VECTORS_OFFSET
+#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)
+#else
+#define VECBASE_VADDR _vecbase
+#endif
+
+#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
+
+#define VECTOR_VADDR(offset) (VECBASE_VADDR + offset)
+
+#define USER_VECTOR_VADDR VECTOR_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR VECTOR_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR VECTOR_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR VECTOR_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL6_VECOFS)
+#define INTLEVEL7_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL7_VECOFS)
+#define DEBUG_VECTOR_VADDR VECTOR_VADDR(XCHAL_DEBUG_VECOFS)
+
+/*
+ * These XCHAL_* #defines from varian/core.h
+ * are not valid to use with V3 MMU. Non-XCHAL
+ * constants are defined above and should be used.
+ */
+#undef XCHAL_VECBASE_RESET_VADDR
+#undef XCHAL_USER_VECTOR_VADDR
+#undef XCHAL_KERNEL_VECTOR_VADDR
+#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
+#undef XCHAL_WINDOW_VECTORS_VADDR
+#undef XCHAL_INTLEVEL2_VECTOR_VADDR
+#undef XCHAL_INTLEVEL3_VECTOR_VADDR
+#undef XCHAL_INTLEVEL4_VECTOR_VADDR
+#undef XCHAL_INTLEVEL5_VECTOR_VADDR
+#undef XCHAL_INTLEVEL6_VECTOR_VADDR
+#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+#undef XCHAL_DEBUG_VECTOR_VADDR
+
+#else
+
+#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR
+#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR
+#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR
+#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR
+#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
+#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR
+#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
+#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define INTLEVEL7_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
+
+#endif
+
+#endif /* _XTENSA_VECTORS_H */
diff --git a/arch/xtensa/include/asm/vga.h b/arch/xtensa/include/asm/vga.h
new file mode 100644
index 000000000..1fd8cab3a
--- /dev/null
+++ b/arch/xtensa/include/asm/vga.h
@@ -0,0 +1,19 @@
+/*
+ * include/asm-xtensa/vga.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_VGA_H
+#define _XTENSA_VGA_H
+
+#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
new file mode 100644
index 000000000..837d4dd76
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -0,0 +1,13 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += kvm_para.h
+generic-y += resource.h
+generic-y += siginfo.h
+generic-y += statfs.h
+generic-y += termios.h
diff --git a/arch/xtensa/include/uapi/asm/auxvec.h b/arch/xtensa/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000..257dec75c
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/auxvec.h
@@ -0,0 +1,4 @@
+#ifndef __XTENSA_AUXVEC_H
+#define __XTENSA_AUXVEC_H
+
+#endif
diff --git a/arch/xtensa/include/uapi/asm/byteorder.h b/arch/xtensa/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000..5b9f832c2
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/byteorder.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _XTENSA_BYTEORDER_H
+#define _XTENSA_BYTEORDER_H
+
+#ifdef __XTENSA_EL__
+#include <linux/byteorder/little_endian.h>
+#elif defined(__XTENSA_EB__)
+#include <linux/byteorder/big_endian.h>
+#else
+# error processor byte order undefined!
+#endif
+
+#endif /* _XTENSA_BYTEORDER_H */
diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h
new file mode 100644
index 000000000..ec43609cb
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/ioctls.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/ioctls.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ *
+ * Derived from "include/asm-i386/ioctls.h"
+ */
+
+#ifndef _XTENSA_IOCTLS_H
+#define _XTENSA_IOCTLS_H
+
+#include <asm/ioctl.h>
+
+#define FIOCLEX _IO('f', 1)
+#define FIONCLEX _IO('f', 2)
+#define FIOASYNC _IOW('f', 125, int)
+#define FIONBIO _IOW('f', 126, int)
+#define FIONREAD _IOR('f', 127, int)
+#define TIOCINQ FIONREAD
+#define FIOQSIZE _IOR('f', 128, loff_t)
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+
+#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
+#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
+#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
+#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */
+
+#define TCSBRK _IO('t', 29)
+#define TCXONC _IO('t', 30)
+#define TCFLSH _IO('t', 31)
+
+#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
+#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
+#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
+#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
+#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
+
+#define TIOCSPGRP _IOW('t', 118, int)
+#define TIOCGPGRP _IOR('t', 119, int)
+
+#define TIOCEXCL _IO('T', 12)
+#define TIOCNXCL _IO('T', 13)
+#define TIOCSCTTY _IO('T', 14)
+
+#define TIOCSTI _IOW('T', 18, char)
+#define TIOCMGET _IOR('T', 21, unsigned int)
+#define TIOCMBIS _IOW('T', 22, unsigned int)
+#define TIOCMBIC _IOW('T', 23, unsigned int)
+#define TIOCMSET _IOW('T', 24, unsigned int)
+# define TIOCM_LE 0x001
+# define TIOCM_DTR 0x002
+# define TIOCM_RTS 0x004
+# define TIOCM_ST 0x008
+# define TIOCM_SR 0x010
+# define TIOCM_CTS 0x020
+# define TIOCM_CAR 0x040
+# define TIOCM_RNG 0x080
+# define TIOCM_DSR 0x100
+# define TIOCM_CD TIOCM_CAR
+# define TIOCM_RI TIOCM_RNG
+
+#define TIOCGSOFTCAR _IOR('T', 25, unsigned int)
+#define TIOCSSOFTCAR _IOW('T', 26, unsigned int)
+#define TIOCLINUX _IOW('T', 28, char)
+#define TIOCCONS _IO('T', 29)
+#define TIOCGSERIAL 0x803C541E /*_IOR('T', 30, struct serial_struct)*/
+#define TIOCSSERIAL 0x403C541F /*_IOW('T', 31, struct serial_struct)*/
+#define TIOCPKT _IOW('T', 32, int)
+# define TIOCPKT_DATA 0
+# define TIOCPKT_FLUSHREAD 1
+# define TIOCPKT_FLUSHWRITE 2
+# define TIOCPKT_STOP 4
+# define TIOCPKT_START 8
+# define TIOCPKT_NOSTOP 16
+# define TIOCPKT_DOSTOP 32
+# define TIOCPKT_IOCTL 64
+
+
+#define TIOCNOTTY _IO('T', 34)
+#define TIOCSETD _IOW('T', 35, int)
+#define TIOCGETD _IOR('T', 36, int)
+#define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/
+#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
+#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
+#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
+#define TCGETS2 _IOR('T', 42, struct termios2)
+#define TCSETS2 _IOW('T', 43, struct termios2)
+#define TCSETSW2 _IOW('T', 44, struct termios2)
+#define TCSETSF2 _IOW('T', 45, struct termios2)
+#define TIOCGRS485 _IOR('T', 46, struct serial_rs485)
+#define TIOCSRS485 _IOWR('T', 47, struct serial_rs485)
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
+#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
+#define TIOCVHANGUP _IO('T', 0x37)
+#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
+#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
+#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
+
+#define TIOCSERCONFIG _IO('T', 83)
+#define TIOCSERGWILD _IOR('T', 84, int)
+#define TIOCSERSWILD _IOW('T', 85, int)
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */
+ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */
+ /* _IOR('T', 90, struct serial_multiport_struct) */
+#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */
+ /* _IOW('T', 91, struct serial_multiport_struct) */
+
+#define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+#endif /* _XTENSA_IOCTLS_H */
diff --git a/arch/xtensa/include/uapi/asm/ipcbuf.h b/arch/xtensa/include/uapi/asm/ipcbuf.h
new file mode 100644
index 000000000..a57afa0b6
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/ipcbuf.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/ipcbuf.h
+ *
+ * The ipc64_perm structure for the Xtensa architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IPCBUF_H
+#define _XTENSA_IPCBUF_H
+
+/*
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+ unsigned long seq;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _XTENSA_IPCBUF_H */
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
new file mode 100644
index 000000000..58f29a9d8
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/mman.h
+ *
+ * Xtensa Processor memory-manager definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 by Ralf Baechle
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMAN_H
+#define _XTENSA_MMAN_H
+
+/*
+ * Protections are chosen from these bits, OR'd together. The
+ * implementation does not necessarily support PROT_EXEC or PROT_WRITE
+ * without PROT_READ. The only guarantees are that no writing will be
+ * allowed without PROT_WRITE and no access will be allowed for PROT_NONE.
+ */
+
+#define PROT_NONE 0x0 /* page can not be accessed */
+#define PROT_READ 0x1 /* page can be read */
+#define PROT_WRITE 0x2 /* page can be written */
+#define PROT_EXEC 0x4 /* page can be executed */
+
+#define PROT_SEM 0x10 /* page may be used for atomic ops */
+#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end fo growsup vma */
+
+/*
+ * Flags for mmap
+ */
+#define MAP_SHARED 0x001 /* Share changes */
+#define MAP_PRIVATE 0x002 /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x003 /* share + validate extension flags */
+#define MAP_TYPE 0x00f /* Mask for type of mapping */
+#define MAP_FIXED 0x010 /* Interpret addr exactly */
+
+/* not used by linux, but here to make sure we don't clash with ABI defines */
+#define MAP_RENAME 0x020 /* Assign page to file */
+#define MAP_AUTOGROW 0x040 /* File may grow by writing */
+#define MAP_LOCAL 0x080 /* Copy on fork/sproc */
+#define MAP_AUTORSRV 0x100 /* Logical swap reserved on demand */
+
+/* These are linux-specific */
+#define MAP_NORESERVE 0x0400 /* don't check for reservations */
+#define MAP_ANONYMOUS 0x0800 /* don't use a file */
+#define MAP_GROWSDOWN 0x1000 /* stack-like segment */
+#define MAP_DENYWRITE 0x2000 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x4000 /* mark it as an executable */
+#define MAP_LOCKED 0x8000 /* pages are locked */
+#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x20000 /* do not block on IO */
+#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
+#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
+#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
+# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
+ * uninitialized */
+#else
+# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
+#endif
+
+/*
+ * Flags for msync
+ */
+#define MS_ASYNC 0x0001 /* sync memory asynchronously */
+#define MS_INVALIDATE 0x0002 /* invalidate mappings & caches */
+#define MS_SYNC 0x0004 /* synchronous memory sync */
+
+/*
+ * Flags for mlockall
+ */
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
+#define MCL_ONFAULT 4 /* lock all pages that are faulted in */
+
+/*
+ * Flags for mlock
+ */
+#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
+
+#define MADV_NORMAL 0 /* no further special treatment */
+#define MADV_RANDOM 1 /* expect random page references */
+#define MADV_SEQUENTIAL 2 /* expect sequential page references */
+#define MADV_WILLNEED 3 /* will need these pages */
+#define MADV_DONTNEED 4 /* don't need these pages */
+
+/* common parameters: try to keep these consistent across architectures */
+#define MADV_FREE 8 /* free pages only if memory pressure */
+#define MADV_REMOVE 9 /* remove these pages & resources */
+#define MADV_DONTFORK 10 /* don't inherit across fork */
+#define MADV_DOFORK 11 /* do inherit across fork */
+
+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
+
+#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
+
+#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
+ overrides the coredump filter bits */
+#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
+/* compatibility flags */
+#define MAP_FILE 0
+
+#define PKEY_DISABLE_ACCESS 0x1
+#define PKEY_DISABLE_WRITE 0x2
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE)
+
+#endif /* _XTENSA_MMAN_H */
diff --git a/arch/xtensa/include/uapi/asm/msgbuf.h b/arch/xtensa/include/uapi/asm/msgbuf.h
new file mode 100644
index 000000000..d6915e9f0
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/msgbuf.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/msgbuf.h
+ *
+ * The msqid64_ds structure for the Xtensa architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 32-bit values
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_MSGBUF_H
+#define _XTENSA_MSGBUF_H
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+#ifdef __XTENSA_EB__
+ unsigned long msg_stime_high;
+ unsigned long msg_stime; /* last msgsnd time */
+ unsigned long msg_rtime_high;
+ unsigned long msg_rtime; /* last msgrcv time */
+ unsigned long msg_ctime_high;
+ unsigned long msg_ctime; /* last change time */
+#elif defined(__XTENSA_EL__)
+ unsigned long msg_stime; /* last msgsnd time */
+ unsigned long msg_stime_high;
+ unsigned long msg_rtime; /* last msgrcv time */
+ unsigned long msg_rtime_high;
+ unsigned long msg_ctime; /* last change time */
+ unsigned long msg_ctime_high;
+#else
+# error processor byte order undefined!
+#endif
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* _XTENSA_MSGBUF_H */
diff --git a/arch/xtensa/include/uapi/asm/param.h b/arch/xtensa/include/uapi/asm/param.h
new file mode 100644
index 000000000..e6feb4ee0
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/param.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/param.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_PARAM_H
+#define _UAPI_XTENSA_PARAM_H
+
+#ifndef __KERNEL__
+# define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif /* _UAPI_XTENSA_PARAM_H */
diff --git a/arch/xtensa/include/uapi/asm/poll.h b/arch/xtensa/include/uapi/asm/poll.h
new file mode 100644
index 000000000..4d249040b
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/poll.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/poll.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_POLL_H
+#define _XTENSA_POLL_H
+
+#define POLLWRNORM POLLOUT
+#define POLLWRBAND 0x0100
+#define POLLREMOVE 0x0800
+
+#include <asm-generic/poll.h>
+
+#endif /* _XTENSA_POLL_H */
diff --git a/arch/xtensa/include/uapi/asm/posix_types.h b/arch/xtensa/include/uapi/asm/posix_types.h
new file mode 100644
index 000000000..1dc675928
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/posix_types.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/posix_types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Largely copied from include/asm-ppc/posix_types.h
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_POSIX_TYPES_H
+#define _XTENSA_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef long __kernel_ptrdiff_t;
+#define __kernel_size_t __kernel_size_t
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+#define __kernel_old_uid_t __kernel_old_uid_t
+
+typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
+
+#include <asm-generic/posix_types.h>
+
+#endif /* _XTENSA_POSIX_TYPES_H */
diff --git a/arch/xtensa/include/uapi/asm/ptrace.h b/arch/xtensa/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000..a10b42963
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/ptrace.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_PTRACE_H
+#define _UAPI_XTENSA_PTRACE_H
+
+/* Registers used by strace */
+
+#define REG_A_BASE 0x0000
+#define REG_AR_BASE 0x0100
+#define REG_PC 0x0020
+#define REG_PS 0x02e6
+#define REG_WB 0x0248
+#define REG_WS 0x0249
+#define REG_LBEG 0x0200
+#define REG_LEND 0x0201
+#define REG_LCOUNT 0x0202
+#define REG_SAR 0x0203
+
+#define SYSCALL_NR 0x00ff
+
+/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
+
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETXTREGS 18
+#define PTRACE_SETXTREGS 19
+#define PTRACE_GETHBPREGS 20
+#define PTRACE_SETHBPREGS 21
+
+
+#endif /* _UAPI_XTENSA_PTRACE_H */
diff --git a/arch/xtensa/include/uapi/asm/sembuf.h b/arch/xtensa/include/uapi/asm/sembuf.h
new file mode 100644
index 000000000..09f348d64
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/sembuf.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/sembuf.h
+ *
+ * The semid64_ds structure for Xtensa architecture.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 32-bit values
+ *
+ */
+
+#ifndef _XTENSA_SEMBUF_H
+#define _XTENSA_SEMBUF_H
+
+#include <asm/byteorder.h>
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+#ifdef __XTENSA_EL__
+ unsigned long sem_otime; /* last semop time */
+ unsigned long sem_otime_high;
+ unsigned long sem_ctime; /* last change time */
+ unsigned long sem_ctime_high;
+#else
+ unsigned long sem_otime_high;
+ unsigned long sem_otime; /* last semop time */
+ unsigned long sem_ctime_high;
+ unsigned long sem_ctime; /* last change time */
+#endif
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_XTENSA_SEMBUF_H */
diff --git a/arch/xtensa/include/uapi/asm/setup.h b/arch/xtensa/include/uapi/asm/setup.h
new file mode 100644
index 000000000..57e6c210e
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/setup.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/setup.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SETUP_H
+#define _XTENSA_SETUP_H
+
+#define COMMAND_LINE_SIZE 256
+
+extern void set_except_vector(int n, void *addr);
+
+#endif
diff --git a/arch/xtensa/include/uapi/asm/shmbuf.h b/arch/xtensa/include/uapi/asm/shmbuf.h
new file mode 100644
index 000000000..554a57a6a
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/shmbuf.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/shmbuf.h
+ *
+ * The shmid64_ds structure for Xtensa architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space, but the padding is on the wrong
+ * side for big-endian xtensa, for historic reasons.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 32-bit values
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SHMBUF_H
+#define _XTENSA_SHMBUF_H
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ unsigned long shm_atime; /* last attach time */
+ unsigned long shm_atime_high;
+ unsigned long shm_dtime; /* last detach time */
+ unsigned long shm_dtime_high;
+ unsigned long shm_ctime; /* last change time */
+ unsigned long shm_ctime_high;
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _XTENSA_SHMBUF_H */
diff --git a/arch/xtensa/include/uapi/asm/sigcontext.h b/arch/xtensa/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000..9cbf39e63
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/sigcontext.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/sigcontext.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SIGCONTEXT_H
+#define _XTENSA_SIGCONTEXT_H
+
+
+struct sigcontext {
+ unsigned long sc_pc;
+ unsigned long sc_ps;
+ unsigned long sc_lbeg;
+ unsigned long sc_lend;
+ unsigned long sc_lcount;
+ unsigned long sc_sar;
+ unsigned long sc_acclo;
+ unsigned long sc_acchi;
+ unsigned long sc_a[16];
+ void *sc_xtregs;
+};
+
+#endif /* _XTENSA_SIGCONTEXT_H */
diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h
new file mode 100644
index 000000000..005dec5bf
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/signal.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/signal.h
+ *
+ * Swiped from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_SIGNAL_H
+#define _UAPI_XTENSA_SIGNAL_H
+
+
+#define _NSIG 64
+#define _NSIG_BPW 32
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#endif
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/* #define SIGLOST 29 */
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX (_NSIG-1)
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#ifndef __ASSEMBLY__
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __KERNEL__
+
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* _UAPI_XTENSA_SIGNAL_H */
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
new file mode 100644
index 000000000..1de07a7f7
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/socket.h
+ *
+ * Copied from i386.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _XTENSA_SOCKET_H
+#define _XTENSA_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockoptions(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_SNDBUFFORCE 32
+#define SO_RCVBUFFORCE 33
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+#define SO_REUSEPORT 15
+#define SO_PASSCRED 16
+#define SO_PEERCRED 17
+#define SO_RCVLOWAT 18
+#define SO_SNDLOWAT 19
+#define SO_RCVTIMEO 20
+#define SO_SNDTIMEO 21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+#define SO_GET_FILTER SO_ATTACH_FILTER
+
+#define SO_PEERNAME 28
+#define SO_TIMESTAMP 29
+#define SCM_TIMESTAMP SO_TIMESTAMP
+
+#define SO_ACCEPTCONN 30
+#define SO_PEERSEC 31
+#define SO_PASSSEC 34
+#define SO_TIMESTAMPNS 35
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+
+#define SO_MARK 36
+
+#define SO_TIMESTAMPING 37
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#define SO_PROTOCOL 38
+#define SO_DOMAIN 39
+
+#define SO_RXQ_OVFL 40
+
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+#define SO_PEEK_OFF 42
+
+/* Instruct lower device to use last 4-bytes of skb data as FCS */
+#define SO_NOFCS 43
+
+#define SO_LOCK_FILTER 44
+
+#define SO_SELECT_ERR_QUEUE 45
+
+#define SO_BUSY_POLL 46
+
+#define SO_MAX_PACING_RATE 47
+
+#define SO_BPF_EXTENSIONS 48
+
+#define SO_INCOMING_CPU 49
+
+#define SO_ATTACH_BPF 50
+#define SO_DETACH_BPF SO_DETACH_FILTER
+
+#define SO_ATTACH_REUSEPORT_CBPF 51
+#define SO_ATTACH_REUSEPORT_EBPF 52
+
+#define SO_CNX_ADVICE 53
+
+#define SCM_TIMESTAMPING_OPT_STATS 54
+
+#define SO_MEMINFO 55
+
+#define SO_INCOMING_NAPI_ID 56
+
+#define SO_COOKIE 57
+
+#define SCM_TIMESTAMPING_PKTINFO 58
+
+#define SO_PEERGROUPS 59
+
+#define SO_ZEROCOPY 60
+
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
+#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/include/uapi/asm/sockios.h b/arch/xtensa/include/uapi/asm/sockios.h
new file mode 100644
index 000000000..fb8ac3607
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/sockios.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/sockios.h
+ *
+ * Socket-level I/O control calls. Copied from MIPS.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 by Ralf Baechle
+ * Copyright (C) 2001 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SOCKIOS_H
+#define _XTENSA_SOCKIOS_H
+
+#include <asm/ioctl.h>
+
+/* Socket-level I/O control calls. */
+
+#define FIOGETOWN _IOR('f', 123, int)
+#define FIOSETOWN _IOW('f', 124, int)
+
+#define SIOCATMARK _IOR('s', 7, int)
+#define SIOCSPGRP _IOW('s', 8, pid_t)
+#define SIOCGPGRP _IOR('s', 9, pid_t)
+
+#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+
+#endif /* _XTENSA_SOCKIOS_H */
diff --git a/arch/xtensa/include/uapi/asm/stat.h b/arch/xtensa/include/uapi/asm/stat.h
new file mode 100644
index 000000000..7b4d90d6d
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/stat.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/stat.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_STAT_H
+#define _XTENSA_STAT_H
+
+#define STAT_HAVE_NSEC 1
+
+struct stat {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned long st_rdev;
+ long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct stat64 {
+ unsigned long long st_dev; /* Device */
+ unsigned long long st_ino; /* File serial number */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long long st_rdev; /* Device number, if device. */
+ long long st_size; /* Size of file, in bytes. */
+ unsigned long st_blksize; /* Optimal block size for I/O. */
+ unsigned long __unused2;
+ unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned long st_atime; /* Time of last access. */
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime; /* Time of last modification. */
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime; /* Time of last status change. */
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* _XTENSA_STAT_H */
diff --git a/arch/xtensa/include/uapi/asm/swab.h b/arch/xtensa/include/uapi/asm/swab.h
new file mode 100644
index 000000000..e677cf4cc
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/swab.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/swab.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SWAB_H
+#define _XTENSA_SWAB_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#define __SWAB_64_THRU_32__
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ __u32 res;
+ /* instruction sequence from Xtensa ISA release 2/2000 */
+ __asm__("ssai 8 \n\t"
+ "srli %0, %1, 16 \n\t"
+ "src %0, %0, %1 \n\t"
+ "src %0, %0, %0 \n\t"
+ "src %0, %1, %0 \n"
+ : "=&a" (res)
+ : "a" (x)
+ );
+ return res;
+}
+#define __arch_swab32 __arch_swab32
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+ /* Given that 'short' values are signed (i.e., can be negative),
+ * we cannot assume that the upper 16-bits of the register are
+ * zero. We are careful to mask values after shifting.
+ */
+
+ /* There exists an anomaly between xt-gcc and xt-xcc. xt-gcc
+ * inserts an extui instruction after putting this function inline
+ * to ensure that it uses only the least-significant 16 bits of
+ * the result. xt-xcc doesn't use an extui, but assumes the
+ * __asm__ macro follows convention that the upper 16 bits of an
+ * 'unsigned short' result are still zero. This macro doesn't
+ * follow convention; indeed, it leaves garbage in the upport 16
+ * bits of the register.
+
+ * Declaring the temporary variables 'res' and 'tmp' to be 32-bit
+ * types while the return type of the function is a 16-bit type
+ * forces both compilers to insert exactly one extui instruction
+ * (or equivalent) to mask off the upper 16 bits. */
+
+ __u32 res;
+ __u32 tmp;
+
+ __asm__("extui %1, %2, 8, 8\n\t"
+ "slli %0, %2, 8 \n\t"
+ "or %0, %0, %1 \n"
+ : "=&a" (res), "=&a" (tmp)
+ : "a" (x)
+ );
+
+ return res;
+}
+#define __arch_swab16 __arch_swab16
+
+#endif /* _XTENSA_SWAB_H */
diff --git a/arch/xtensa/include/uapi/asm/termbits.h b/arch/xtensa/include/uapi/asm/termbits.h
new file mode 100644
index 000000000..d4206a7c5
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/termbits.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/termbits.h
+ *
+ * Copied from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TERMBITS_H
+#define _XTENSA_TERMBITS_H
+
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+#define IUTF8 0040000
+
+/* c_oflag bits */
+
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define BOTHER 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* c_lflag bits */
+
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+#define EXTPROC 0200000
+
+/* tcflow() and TCXONC use these */
+
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _XTENSA_TERMBITS_H */
diff --git a/arch/xtensa/include/uapi/asm/types.h b/arch/xtensa/include/uapi/asm/types.h
new file mode 100644
index 000000000..12db8ac38
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/types.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * include/asm-xtensa/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_TYPES_H
+#define _UAPI_XTENSA_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+#ifdef __ASSEMBLY__
+# define __XTENSA_UL(x) (x)
+# define __XTENSA_UL_CONST(x) x
+#else
+# define __XTENSA_UL(x) ((unsigned long)(x))
+# define ___XTENSA_UL_CONST(x) x##UL
+# define __XTENSA_UL_CONST(x) ___XTENSA_UL_CONST(x)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#endif
+
+#endif /* _UAPI_XTENSA_TYPES_H */
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
new file mode 100644
index 000000000..bc3f62db5
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -0,0 +1,803 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#if !defined(_UAPI_XTENSA_UNISTD_H) || defined(__SYSCALL)
+#define _UAPI_XTENSA_UNISTD_H
+
+#ifndef __SYSCALL
+# define __SYSCALL(nr,func,nargs)
+#endif
+
+#define __NR_spill 0
+__SYSCALL( 0, sys_ni_syscall, 0)
+#define __NR_xtensa 1
+__SYSCALL( 1, sys_ni_syscall, 0)
+#define __NR_available4 2
+__SYSCALL( 2, sys_ni_syscall, 0)
+#define __NR_available5 3
+__SYSCALL( 3, sys_ni_syscall, 0)
+#define __NR_available6 4
+__SYSCALL( 4, sys_ni_syscall, 0)
+#define __NR_available7 5
+__SYSCALL( 5, sys_ni_syscall, 0)
+#define __NR_available8 6
+__SYSCALL( 6, sys_ni_syscall, 0)
+#define __NR_available9 7
+__SYSCALL( 7, sys_ni_syscall, 0)
+
+/* File Operations */
+
+#define __NR_open 8
+__SYSCALL( 8, sys_open, 3)
+#define __NR_close 9
+__SYSCALL( 9, sys_close, 1)
+#define __NR_dup 10
+__SYSCALL( 10, sys_dup, 1)
+#define __NR_dup2 11
+__SYSCALL( 11, sys_dup2, 2)
+#define __NR_read 12
+__SYSCALL( 12, sys_read, 3)
+#define __NR_write 13
+__SYSCALL( 13, sys_write, 3)
+#define __NR_select 14
+__SYSCALL( 14, sys_select, 5)
+#define __NR_lseek 15
+__SYSCALL( 15, sys_lseek, 3)
+#define __NR_poll 16
+__SYSCALL( 16, sys_poll, 3)
+#define __NR__llseek 17
+__SYSCALL( 17, sys_llseek, 5)
+#define __NR_epoll_wait 18
+__SYSCALL( 18, sys_epoll_wait, 4)
+#define __NR_epoll_ctl 19
+__SYSCALL( 19, sys_epoll_ctl, 4)
+#define __NR_epoll_create 20
+__SYSCALL( 20, sys_epoll_create, 1)
+#define __NR_creat 21
+__SYSCALL( 21, sys_creat, 2)
+#define __NR_truncate 22
+__SYSCALL( 22, sys_truncate, 2)
+#define __NR_ftruncate 23
+__SYSCALL( 23, sys_ftruncate, 2)
+#define __NR_readv 24
+__SYSCALL( 24, sys_readv, 3)
+#define __NR_writev 25
+__SYSCALL( 25, sys_writev, 3)
+#define __NR_fsync 26
+__SYSCALL( 26, sys_fsync, 1)
+#define __NR_fdatasync 27
+__SYSCALL( 27, sys_fdatasync, 1)
+#define __NR_truncate64 28
+__SYSCALL( 28, sys_truncate64, 2)
+#define __NR_ftruncate64 29
+__SYSCALL( 29, sys_ftruncate64, 2)
+#define __NR_pread64 30
+__SYSCALL( 30, sys_pread64, 6)
+#define __NR_pwrite64 31
+__SYSCALL( 31, sys_pwrite64, 6)
+
+#define __NR_link 32
+__SYSCALL( 32, sys_link, 2)
+#define __NR_rename 33
+__SYSCALL( 33, sys_rename, 2)
+#define __NR_symlink 34
+__SYSCALL( 34, sys_symlink, 2)
+#define __NR_readlink 35
+__SYSCALL( 35, sys_readlink, 3)
+#define __NR_mknod 36
+__SYSCALL( 36, sys_mknod, 3)
+#define __NR_pipe 37
+__SYSCALL( 37, sys_pipe, 1)
+#define __NR_unlink 38
+__SYSCALL( 38, sys_unlink, 1)
+#define __NR_rmdir 39
+__SYSCALL( 39, sys_rmdir, 1)
+
+#define __NR_mkdir 40
+__SYSCALL( 40, sys_mkdir, 2)
+#define __NR_chdir 41
+__SYSCALL( 41, sys_chdir, 1)
+#define __NR_fchdir 42
+__SYSCALL( 42, sys_fchdir, 1)
+#define __NR_getcwd 43
+__SYSCALL( 43, sys_getcwd, 2)
+
+#define __NR_chmod 44
+__SYSCALL( 44, sys_chmod, 2)
+#define __NR_chown 45
+__SYSCALL( 45, sys_chown, 3)
+#define __NR_stat 46
+__SYSCALL( 46, sys_newstat, 2)
+#define __NR_stat64 47
+__SYSCALL( 47, sys_stat64, 2)
+
+#define __NR_lchown 48
+__SYSCALL( 48, sys_lchown, 3)
+#define __NR_lstat 49
+__SYSCALL( 49, sys_newlstat, 2)
+#define __NR_lstat64 50
+__SYSCALL( 50, sys_lstat64, 2)
+#define __NR_available51 51
+__SYSCALL( 51, sys_ni_syscall, 0)
+
+#define __NR_fchmod 52
+__SYSCALL( 52, sys_fchmod, 2)
+#define __NR_fchown 53
+__SYSCALL( 53, sys_fchown, 3)
+#define __NR_fstat 54
+__SYSCALL( 54, sys_newfstat, 2)
+#define __NR_fstat64 55
+__SYSCALL( 55, sys_fstat64, 2)
+
+#define __NR_flock 56
+__SYSCALL( 56, sys_flock, 2)
+#define __NR_access 57
+__SYSCALL( 57, sys_access, 2)
+#define __NR_umask 58
+__SYSCALL( 58, sys_umask, 1)
+#define __NR_getdents 59
+__SYSCALL( 59, sys_getdents, 3)
+#define __NR_getdents64 60
+__SYSCALL( 60, sys_getdents64, 3)
+#define __NR_fcntl64 61
+__SYSCALL( 61, sys_fcntl64, 3)
+#define __NR_fallocate 62
+__SYSCALL( 62, sys_fallocate, 6)
+#define __NR_fadvise64_64 63
+__SYSCALL( 63, xtensa_fadvise64_64, 6)
+#define __NR_utime 64 /* glibc 2.3.3 ?? */
+__SYSCALL( 64, sys_utime, 2)
+#define __NR_utimes 65
+__SYSCALL( 65, sys_utimes, 2)
+#define __NR_ioctl 66
+__SYSCALL( 66, sys_ioctl, 3)
+#define __NR_fcntl 67
+__SYSCALL( 67, sys_fcntl, 3)
+
+#define __NR_setxattr 68
+__SYSCALL( 68, sys_setxattr, 5)
+#define __NR_getxattr 69
+__SYSCALL( 69, sys_getxattr, 4)
+#define __NR_listxattr 70
+__SYSCALL( 70, sys_listxattr, 3)
+#define __NR_removexattr 71
+__SYSCALL( 71, sys_removexattr, 2)
+#define __NR_lsetxattr 72
+__SYSCALL( 72, sys_lsetxattr, 5)
+#define __NR_lgetxattr 73
+__SYSCALL( 73, sys_lgetxattr, 4)
+#define __NR_llistxattr 74
+__SYSCALL( 74, sys_llistxattr, 3)
+#define __NR_lremovexattr 75
+__SYSCALL( 75, sys_lremovexattr, 2)
+#define __NR_fsetxattr 76
+__SYSCALL( 76, sys_fsetxattr, 5)
+#define __NR_fgetxattr 77
+__SYSCALL( 77, sys_fgetxattr, 4)
+#define __NR_flistxattr 78
+__SYSCALL( 78, sys_flistxattr, 3)
+#define __NR_fremovexattr 79
+__SYSCALL( 79, sys_fremovexattr, 2)
+
+/* File Map / Shared Memory Operations */
+
+#define __NR_mmap2 80
+__SYSCALL( 80, sys_mmap_pgoff, 6)
+#define __NR_munmap 81
+__SYSCALL( 81, sys_munmap, 2)
+#define __NR_mprotect 82
+__SYSCALL( 82, sys_mprotect, 3)
+#define __NR_brk 83
+__SYSCALL( 83, sys_brk, 1)
+#define __NR_mlock 84
+__SYSCALL( 84, sys_mlock, 2)
+#define __NR_munlock 85
+__SYSCALL( 85, sys_munlock, 2)
+#define __NR_mlockall 86
+__SYSCALL( 86, sys_mlockall, 1)
+#define __NR_munlockall 87
+__SYSCALL( 87, sys_munlockall, 0)
+#define __NR_mremap 88
+__SYSCALL( 88, sys_mremap, 4)
+#define __NR_msync 89
+__SYSCALL( 89, sys_msync, 3)
+#define __NR_mincore 90
+__SYSCALL( 90, sys_mincore, 3)
+#define __NR_madvise 91
+__SYSCALL( 91, sys_madvise, 3)
+#define __NR_shmget 92
+__SYSCALL( 92, sys_shmget, 4)
+#define __NR_shmat 93
+__SYSCALL( 93, xtensa_shmat, 4)
+#define __NR_shmctl 94
+__SYSCALL( 94, sys_shmctl, 4)
+#define __NR_shmdt 95
+__SYSCALL( 95, sys_shmdt, 4)
+
+/* Socket Operations */
+
+#define __NR_socket 96
+__SYSCALL( 96, sys_socket, 3)
+#define __NR_setsockopt 97
+__SYSCALL( 97, sys_setsockopt, 5)
+#define __NR_getsockopt 98
+__SYSCALL( 98, sys_getsockopt, 5)
+#define __NR_shutdown 99
+__SYSCALL( 99, sys_shutdown, 2)
+
+#define __NR_bind 100
+__SYSCALL(100, sys_bind, 3)
+#define __NR_connect 101
+__SYSCALL(101, sys_connect, 3)
+#define __NR_listen 102
+__SYSCALL(102, sys_listen, 2)
+#define __NR_accept 103
+__SYSCALL(103, sys_accept, 3)
+
+#define __NR_getsockname 104
+__SYSCALL(104, sys_getsockname, 3)
+#define __NR_getpeername 105
+__SYSCALL(105, sys_getpeername, 3)
+#define __NR_sendmsg 106
+__SYSCALL(106, sys_sendmsg, 3)
+#define __NR_recvmsg 107
+__SYSCALL(107, sys_recvmsg, 3)
+#define __NR_send 108
+__SYSCALL(108, sys_send, 4)
+#define __NR_recv 109
+__SYSCALL(109, sys_recv, 4)
+#define __NR_sendto 110
+__SYSCALL(110, sys_sendto, 6)
+#define __NR_recvfrom 111
+__SYSCALL(111, sys_recvfrom, 6)
+
+#define __NR_socketpair 112
+__SYSCALL(112, sys_socketpair, 4)
+#define __NR_sendfile 113
+__SYSCALL(113, sys_sendfile, 4)
+#define __NR_sendfile64 114
+__SYSCALL(114, sys_sendfile64, 4)
+#define __NR_sendmmsg 115
+__SYSCALL(115, sys_sendmmsg, 4)
+
+/* Process Operations */
+
+#define __NR_clone 116
+__SYSCALL(116, sys_clone, 5)
+#define __NR_execve 117
+__SYSCALL(117, sys_execve, 3)
+#define __NR_exit 118
+__SYSCALL(118, sys_exit, 1)
+#define __NR_exit_group 119
+__SYSCALL(119, sys_exit_group, 1)
+#define __NR_getpid 120
+__SYSCALL(120, sys_getpid, 0)
+#define __NR_wait4 121
+__SYSCALL(121, sys_wait4, 4)
+#define __NR_waitid 122
+__SYSCALL(122, sys_waitid, 5)
+#define __NR_kill 123
+__SYSCALL(123, sys_kill, 2)
+#define __NR_tkill 124
+__SYSCALL(124, sys_tkill, 2)
+#define __NR_tgkill 125
+__SYSCALL(125, sys_tgkill, 3)
+#define __NR_set_tid_address 126
+__SYSCALL(126, sys_set_tid_address, 1)
+#define __NR_gettid 127
+__SYSCALL(127, sys_gettid, 0)
+#define __NR_setsid 128
+__SYSCALL(128, sys_setsid, 0)
+#define __NR_getsid 129
+__SYSCALL(129, sys_getsid, 1)
+#define __NR_prctl 130
+__SYSCALL(130, sys_prctl, 5)
+#define __NR_personality 131
+__SYSCALL(131, sys_personality, 1)
+#define __NR_getpriority 132
+__SYSCALL(132, sys_getpriority, 2)
+#define __NR_setpriority 133
+__SYSCALL(133, sys_setpriority, 3)
+#define __NR_setitimer 134
+__SYSCALL(134, sys_setitimer, 3)
+#define __NR_getitimer 135
+__SYSCALL(135, sys_getitimer, 2)
+#define __NR_setuid 136
+__SYSCALL(136, sys_setuid, 1)
+#define __NR_getuid 137
+__SYSCALL(137, sys_getuid, 0)
+#define __NR_setgid 138
+__SYSCALL(138, sys_setgid, 1)
+#define __NR_getgid 139
+__SYSCALL(139, sys_getgid, 0)
+#define __NR_geteuid 140
+__SYSCALL(140, sys_geteuid, 0)
+#define __NR_getegid 141
+__SYSCALL(141, sys_getegid, 0)
+#define __NR_setreuid 142
+__SYSCALL(142, sys_setreuid, 2)
+#define __NR_setregid 143
+__SYSCALL(143, sys_setregid, 2)
+#define __NR_setresuid 144
+__SYSCALL(144, sys_setresuid, 3)
+#define __NR_getresuid 145
+__SYSCALL(145, sys_getresuid, 3)
+#define __NR_setresgid 146
+__SYSCALL(146, sys_setresgid, 3)
+#define __NR_getresgid 147
+__SYSCALL(147, sys_getresgid, 3)
+#define __NR_setpgid 148
+__SYSCALL(148, sys_setpgid, 2)
+#define __NR_getpgid 149
+__SYSCALL(149, sys_getpgid, 1)
+#define __NR_getppid 150
+__SYSCALL(150, sys_getppid, 0)
+#define __NR_getpgrp 151
+__SYSCALL(151, sys_getpgrp, 0)
+
+#define __NR_reserved152 152 /* set_thread_area */
+__SYSCALL(152, sys_ni_syscall, 0)
+#define __NR_reserved153 153 /* get_thread_area */
+__SYSCALL(153, sys_ni_syscall, 0)
+#define __NR_times 154
+__SYSCALL(154, sys_times, 1)
+#define __NR_acct 155
+__SYSCALL(155, sys_acct, 1)
+#define __NR_sched_setaffinity 156
+__SYSCALL(156, sys_sched_setaffinity, 3)
+#define __NR_sched_getaffinity 157
+__SYSCALL(157, sys_sched_getaffinity, 3)
+#define __NR_capget 158
+__SYSCALL(158, sys_capget, 2)
+#define __NR_capset 159
+__SYSCALL(159, sys_capset, 2)
+#define __NR_ptrace 160
+__SYSCALL(160, sys_ptrace, 4)
+#define __NR_semtimedop 161
+__SYSCALL(161, sys_semtimedop, 5)
+#define __NR_semget 162
+__SYSCALL(162, sys_semget, 4)
+#define __NR_semop 163
+__SYSCALL(163, sys_semop, 4)
+#define __NR_semctl 164
+__SYSCALL(164, sys_semctl, 4)
+#define __NR_available165 165
+__SYSCALL(165, sys_ni_syscall, 0)
+#define __NR_msgget 166
+__SYSCALL(166, sys_msgget, 4)
+#define __NR_msgsnd 167
+__SYSCALL(167, sys_msgsnd, 4)
+#define __NR_msgrcv 168
+__SYSCALL(168, sys_msgrcv, 4)
+#define __NR_msgctl 169
+__SYSCALL(169, sys_msgctl, 4)
+#define __NR_available170 170
+__SYSCALL(170, sys_ni_syscall, 0)
+
+/* File System */
+
+#define __NR_umount2 171
+__SYSCALL(171, sys_umount, 2)
+#define __NR_mount 172
+__SYSCALL(172, sys_mount, 5)
+#define __NR_swapon 173
+__SYSCALL(173, sys_swapon, 2)
+#define __NR_chroot 174
+__SYSCALL(174, sys_chroot, 1)
+#define __NR_pivot_root 175
+__SYSCALL(175, sys_pivot_root, 2)
+#define __NR_umount 176
+__SYSCALL(176, sys_oldumount, 1)
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __NR_swapoff 177
+__SYSCALL(177, sys_swapoff, 1)
+#define __NR_sync 178
+__SYSCALL(178, sys_sync, 0)
+#define __NR_syncfs 179
+__SYSCALL(179, sys_syncfs, 1)
+#define __NR_setfsuid 180
+__SYSCALL(180, sys_setfsuid, 1)
+#define __NR_setfsgid 181
+__SYSCALL(181, sys_setfsgid, 1)
+#define __NR_sysfs 182
+__SYSCALL(182, sys_sysfs, 3)
+#define __NR_ustat 183
+__SYSCALL(183, sys_ustat, 2)
+#define __NR_statfs 184
+__SYSCALL(184, sys_statfs, 2)
+#define __NR_fstatfs 185
+__SYSCALL(185, sys_fstatfs, 2)
+#define __NR_statfs64 186
+__SYSCALL(186, sys_statfs64, 3)
+#define __NR_fstatfs64 187
+__SYSCALL(187, sys_fstatfs64, 3)
+
+/* System */
+
+#define __NR_setrlimit 188
+__SYSCALL(188, sys_setrlimit, 2)
+#define __NR_getrlimit 189
+__SYSCALL(189, sys_getrlimit, 2)
+#define __NR_getrusage 190
+__SYSCALL(190, sys_getrusage, 2)
+#define __NR_futex 191
+__SYSCALL(191, sys_futex, 5)
+#define __NR_gettimeofday 192
+__SYSCALL(192, sys_gettimeofday, 2)
+#define __NR_settimeofday 193
+__SYSCALL(193, sys_settimeofday, 2)
+#define __NR_adjtimex 194
+__SYSCALL(194, sys_adjtimex, 1)
+#define __NR_nanosleep 195
+__SYSCALL(195, sys_nanosleep, 2)
+#define __NR_getgroups 196
+__SYSCALL(196, sys_getgroups, 2)
+#define __NR_setgroups 197
+__SYSCALL(197, sys_setgroups, 2)
+#define __NR_sethostname 198
+__SYSCALL(198, sys_sethostname, 2)
+#define __NR_setdomainname 199
+__SYSCALL(199, sys_setdomainname, 2)
+#define __NR_syslog 200
+__SYSCALL(200, sys_syslog, 3)
+#define __NR_vhangup 201
+__SYSCALL(201, sys_vhangup, 0)
+#define __NR_uselib 202
+__SYSCALL(202, sys_uselib, 1)
+#define __NR_reboot 203
+__SYSCALL(203, sys_reboot, 3)
+#define __NR_quotactl 204
+__SYSCALL(204, sys_quotactl, 4)
+#define __NR_nfsservctl 205
+__SYSCALL(205, sys_ni_syscall, 0) /* old nfsservctl */
+#define __NR__sysctl 206
+__SYSCALL(206, sys_sysctl, 1)
+#define __NR_bdflush 207
+__SYSCALL(207, sys_bdflush, 2)
+#define __NR_uname 208
+__SYSCALL(208, sys_newuname, 1)
+#define __NR_sysinfo 209
+__SYSCALL(209, sys_sysinfo, 1)
+#define __NR_init_module 210
+__SYSCALL(210, sys_init_module, 2)
+#define __NR_delete_module 211
+__SYSCALL(211, sys_delete_module, 1)
+
+#define __NR_sched_setparam 212
+__SYSCALL(212, sys_sched_setparam, 2)
+#define __NR_sched_getparam 213
+__SYSCALL(213, sys_sched_getparam, 2)
+#define __NR_sched_setscheduler 214
+__SYSCALL(214, sys_sched_setscheduler, 3)
+#define __NR_sched_getscheduler 215
+__SYSCALL(215, sys_sched_getscheduler, 1)
+#define __NR_sched_get_priority_max 216
+__SYSCALL(216, sys_sched_get_priority_max, 1)
+#define __NR_sched_get_priority_min 217
+__SYSCALL(217, sys_sched_get_priority_min, 1)
+#define __NR_sched_rr_get_interval 218
+__SYSCALL(218, sys_sched_rr_get_interval, 2)
+#define __NR_sched_yield 219
+__SYSCALL(219, sys_sched_yield, 0)
+#define __NR_available222 222
+__SYSCALL(222, sys_ni_syscall, 0)
+
+/* Signal Handling */
+
+#define __NR_restart_syscall 223
+__SYSCALL(223, sys_restart_syscall, 0)
+#define __NR_sigaltstack 224
+__SYSCALL(224, sys_sigaltstack, 2)
+#define __NR_rt_sigreturn 225
+__SYSCALL(225, xtensa_rt_sigreturn, 1)
+#define __NR_rt_sigaction 226
+__SYSCALL(226, sys_rt_sigaction, 4)
+#define __NR_rt_sigprocmask 227
+__SYSCALL(227, sys_rt_sigprocmask, 4)
+#define __NR_rt_sigpending 228
+__SYSCALL(228, sys_rt_sigpending, 2)
+#define __NR_rt_sigtimedwait 229
+__SYSCALL(229, sys_rt_sigtimedwait, 4)
+#define __NR_rt_sigqueueinfo 230
+__SYSCALL(230, sys_rt_sigqueueinfo, 3)
+#define __NR_rt_sigsuspend 231
+__SYSCALL(231, sys_rt_sigsuspend, 2)
+
+/* Message */
+
+#define __NR_mq_open 232
+__SYSCALL(232, sys_mq_open, 4)
+#define __NR_mq_unlink 233
+__SYSCALL(233, sys_mq_unlink, 1)
+#define __NR_mq_timedsend 234
+__SYSCALL(234, sys_mq_timedsend, 5)
+#define __NR_mq_timedreceive 235
+__SYSCALL(235, sys_mq_timedreceive, 5)
+#define __NR_mq_notify 236
+__SYSCALL(236, sys_mq_notify, 2)
+#define __NR_mq_getsetattr 237
+__SYSCALL(237, sys_mq_getsetattr, 3)
+#define __NR_available238 238
+__SYSCALL(238, sys_ni_syscall, 0)
+
+/* IO */
+
+#define __NR_io_setup 239
+__SYSCALL(239, sys_io_setup, 2)
+#define __NR_io_destroy 240
+__SYSCALL(240, sys_io_destroy, 1)
+#define __NR_io_submit 241
+__SYSCALL(241, sys_io_submit, 3)
+#define __NR_io_getevents 242
+__SYSCALL(242, sys_io_getevents, 5)
+#define __NR_io_cancel 243
+__SYSCALL(243, sys_io_cancel, 3)
+#define __NR_clock_settime 244
+__SYSCALL(244, sys_clock_settime, 2)
+#define __NR_clock_gettime 245
+__SYSCALL(245, sys_clock_gettime, 2)
+#define __NR_clock_getres 246
+__SYSCALL(246, sys_clock_getres, 2)
+#define __NR_clock_nanosleep 247
+__SYSCALL(247, sys_clock_nanosleep, 4)
+
+/* Timer */
+
+#define __NR_timer_create 248
+__SYSCALL(248, sys_timer_create, 3)
+#define __NR_timer_delete 249
+__SYSCALL(249, sys_timer_delete, 1)
+#define __NR_timer_settime 250
+__SYSCALL(250, sys_timer_settime, 4)
+#define __NR_timer_gettime 251
+__SYSCALL(251, sys_timer_gettime, 2)
+#define __NR_timer_getoverrun 252
+__SYSCALL(252, sys_timer_getoverrun, 1)
+
+/* System */
+
+#define __NR_reserved253 253
+__SYSCALL(253, sys_ni_syscall, 0)
+#define __NR_lookup_dcookie 254
+__SYSCALL(254, sys_lookup_dcookie, 4)
+#define __NR_available255 255
+__SYSCALL(255, sys_ni_syscall, 0)
+#define __NR_add_key 256
+__SYSCALL(256, sys_add_key, 5)
+#define __NR_request_key 257
+__SYSCALL(257, sys_request_key, 5)
+#define __NR_keyctl 258
+__SYSCALL(258, sys_keyctl, 5)
+#define __NR_available259 259
+__SYSCALL(259, sys_ni_syscall, 0)
+
+
+#define __NR_readahead 260
+__SYSCALL(260, sys_readahead, 5)
+#define __NR_remap_file_pages 261
+__SYSCALL(261, sys_remap_file_pages, 5)
+#define __NR_migrate_pages 262
+__SYSCALL(262, sys_migrate_pages, 0)
+#define __NR_mbind 263
+__SYSCALL(263, sys_mbind, 6)
+#define __NR_get_mempolicy 264
+__SYSCALL(264, sys_get_mempolicy, 5)
+#define __NR_set_mempolicy 265
+__SYSCALL(265, sys_set_mempolicy, 3)
+#define __NR_unshare 266
+__SYSCALL(266, sys_unshare, 1)
+#define __NR_move_pages 267
+__SYSCALL(267, sys_move_pages, 0)
+#define __NR_splice 268
+__SYSCALL(268, sys_splice, 0)
+#define __NR_tee 269
+__SYSCALL(269, sys_tee, 0)
+#define __NR_vmsplice 270
+__SYSCALL(270, sys_vmsplice, 0)
+#define __NR_available271 271
+__SYSCALL(271, sys_ni_syscall, 0)
+
+#define __NR_pselect6 272
+__SYSCALL(272, sys_pselect6, 0)
+#define __NR_ppoll 273
+__SYSCALL(273, sys_ppoll, 0)
+#define __NR_epoll_pwait 274
+__SYSCALL(274, sys_epoll_pwait, 0)
+#define __NR_epoll_create1 275
+__SYSCALL(275, sys_epoll_create1, 1)
+
+#define __NR_inotify_init 276
+__SYSCALL(276, sys_inotify_init, 0)
+#define __NR_inotify_add_watch 277
+__SYSCALL(277, sys_inotify_add_watch, 3)
+#define __NR_inotify_rm_watch 278
+__SYSCALL(278, sys_inotify_rm_watch, 2)
+#define __NR_inotify_init1 279
+__SYSCALL(279, sys_inotify_init1, 1)
+
+#define __NR_getcpu 280
+__SYSCALL(280, sys_getcpu, 0)
+#define __NR_kexec_load 281
+__SYSCALL(281, sys_ni_syscall, 0)
+
+#define __NR_ioprio_set 282
+__SYSCALL(282, sys_ioprio_set, 2)
+#define __NR_ioprio_get 283
+__SYSCALL(283, sys_ioprio_get, 3)
+
+#define __NR_set_robust_list 284
+__SYSCALL(284, sys_set_robust_list, 3)
+#define __NR_get_robust_list 285
+__SYSCALL(285, sys_get_robust_list, 3)
+#define __NR_available286 286
+__SYSCALL(286, sys_ni_syscall, 0)
+#define __NR_available287 287
+__SYSCALL(287, sys_ni_syscall, 0)
+
+/* Relative File Operations */
+
+#define __NR_openat 288
+__SYSCALL(288, sys_openat, 4)
+#define __NR_mkdirat 289
+__SYSCALL(289, sys_mkdirat, 3)
+#define __NR_mknodat 290
+__SYSCALL(290, sys_mknodat, 4)
+#define __NR_unlinkat 291
+__SYSCALL(291, sys_unlinkat, 3)
+#define __NR_renameat 292
+__SYSCALL(292, sys_renameat, 4)
+#define __NR_linkat 293
+__SYSCALL(293, sys_linkat, 5)
+#define __NR_symlinkat 294
+__SYSCALL(294, sys_symlinkat, 3)
+#define __NR_readlinkat 295
+__SYSCALL(295, sys_readlinkat, 4)
+#define __NR_utimensat 296
+__SYSCALL(296, sys_utimensat, 0)
+#define __NR_fchownat 297
+__SYSCALL(297, sys_fchownat, 5)
+#define __NR_futimesat 298
+__SYSCALL(298, sys_futimesat, 4)
+#define __NR_fstatat64 299
+__SYSCALL(299, sys_fstatat64, 0)
+#define __NR_fchmodat 300
+__SYSCALL(300, sys_fchmodat, 4)
+#define __NR_faccessat 301
+__SYSCALL(301, sys_faccessat, 4)
+#define __NR_available302 302
+__SYSCALL(302, sys_ni_syscall, 0)
+#define __NR_available303 303
+__SYSCALL(303, sys_ni_syscall, 0)
+
+#define __NR_signalfd 304
+__SYSCALL(304, sys_signalfd, 3)
+/* 305 was __NR_timerfd */
+__SYSCALL(305, sys_ni_syscall, 0)
+#define __NR_eventfd 306
+__SYSCALL(306, sys_eventfd, 1)
+#define __NR_recvmmsg 307
+__SYSCALL(307, sys_recvmmsg, 5)
+
+#define __NR_setns 308
+__SYSCALL(308, sys_setns, 2)
+#define __NR_signalfd4 309
+__SYSCALL(309, sys_signalfd4, 4)
+#define __NR_dup3 310
+__SYSCALL(310, sys_dup3, 3)
+#define __NR_pipe2 311
+__SYSCALL(311, sys_pipe2, 2)
+
+#define __NR_timerfd_create 312
+__SYSCALL(312, sys_timerfd_create, 2)
+#define __NR_timerfd_settime 313
+__SYSCALL(313, sys_timerfd_settime, 4)
+#define __NR_timerfd_gettime 314
+__SYSCALL(314, sys_timerfd_gettime, 2)
+#define __NR_available315 315
+__SYSCALL(315, sys_ni_syscall, 0)
+
+#define __NR_eventfd2 316
+__SYSCALL(316, sys_eventfd2, 2)
+#define __NR_preadv 317
+__SYSCALL(317, sys_preadv, 5)
+#define __NR_pwritev 318
+__SYSCALL(318, sys_pwritev, 5)
+#define __NR_available319 319
+__SYSCALL(319, sys_ni_syscall, 0)
+
+#define __NR_fanotify_init 320
+__SYSCALL(320, sys_fanotify_init, 2)
+#define __NR_fanotify_mark 321
+__SYSCALL(321, sys_fanotify_mark, 6)
+#define __NR_process_vm_readv 322
+__SYSCALL(322, sys_process_vm_readv, 6)
+#define __NR_process_vm_writev 323
+__SYSCALL(323, sys_process_vm_writev, 6)
+
+#define __NR_name_to_handle_at 324
+__SYSCALL(324, sys_name_to_handle_at, 5)
+#define __NR_open_by_handle_at 325
+__SYSCALL(325, sys_open_by_handle_at, 3)
+#define __NR_sync_file_range2 326
+__SYSCALL(326, sys_sync_file_range2, 6)
+#define __NR_perf_event_open 327
+__SYSCALL(327, sys_perf_event_open, 5)
+
+#define __NR_rt_tgsigqueueinfo 328
+__SYSCALL(328, sys_rt_tgsigqueueinfo, 4)
+#define __NR_clock_adjtime 329
+__SYSCALL(329, sys_clock_adjtime, 2)
+#define __NR_prlimit64 330
+__SYSCALL(330, sys_prlimit64, 4)
+#define __NR_kcmp 331
+__SYSCALL(331, sys_kcmp, 5)
+
+#define __NR_finit_module 332
+__SYSCALL(332, sys_finit_module, 3)
+
+#define __NR_accept4 333
+__SYSCALL(333, sys_accept4, 4)
+
+#define __NR_sched_setattr 334
+__SYSCALL(334, sys_sched_setattr, 2)
+#define __NR_sched_getattr 335
+__SYSCALL(335, sys_sched_getattr, 3)
+
+#define __NR_renameat2 336
+__SYSCALL(336, sys_renameat2, 5)
+
+#define __NR_seccomp 337
+__SYSCALL(337, sys_seccomp, 3)
+#define __NR_getrandom 338
+__SYSCALL(338, sys_getrandom, 3)
+#define __NR_memfd_create 339
+__SYSCALL(339, sys_memfd_create, 2)
+#define __NR_bpf 340
+__SYSCALL(340, sys_bpf, 3)
+#define __NR_execveat 341
+__SYSCALL(341, sys_execveat, 5)
+
+#define __NR_userfaultfd 342
+__SYSCALL(342, sys_userfaultfd, 1)
+#define __NR_membarrier 343
+__SYSCALL(343, sys_membarrier, 2)
+#define __NR_mlock2 344
+__SYSCALL(344, sys_mlock2, 3)
+#define __NR_copy_file_range 345
+__SYSCALL(345, sys_copy_file_range, 6)
+#define __NR_preadv2 346
+__SYSCALL(346, sys_preadv2, 6)
+#define __NR_pwritev2 347
+__SYSCALL(347, sys_pwritev2, 6)
+
+#define __NR_pkey_mprotect 348
+__SYSCALL(348, sys_pkey_mprotect, 4)
+#define __NR_pkey_alloc 349
+__SYSCALL(349, sys_pkey_alloc, 2)
+#define __NR_pkey_free 350
+__SYSCALL(350, sys_pkey_free, 1)
+
+#define __NR_statx 351
+__SYSCALL(351, sys_statx, 5)
+
+#define __NR_syscall_count 352
+
+/*
+ * sysxtensa syscall handler
+ *
+ * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
+ * a2 a6 a3 a4 a5
+ */
+
+#define SYS_XTENSA_RESERVED 0 /* don't use this */
+#define SYS_XTENSA_ATOMIC_SET 1 /* set variable */
+#define SYS_XTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */
+#define SYS_XTENSA_ATOMIC_ADD 3 /* add to memory */
+#define SYS_XTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */
+
+#define SYS_XTENSA_COUNT 5 /* count */
+
+#undef __SYSCALL
+
+#endif /* _UAPI_XTENSA_UNISTD_H */
diff --git a/arch/xtensa/kernel/.gitignore b/arch/xtensa/kernel/.gitignore
new file mode 100644
index 000000000..c5f676c3c
--- /dev/null
+++ b/arch/xtensa/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
new file mode 100644
index 000000000..91907590d
--- /dev/null
+++ b/arch/xtensa/kernel/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/Xtensa kernel.
+#
+
+extra-y := head.o vmlinux.lds
+
+obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
+ ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
+ vectors.o
+
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+obj-$(CONFIG_SMP) += smp.o mxhead.o
+obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
+
+# In the Xtensa architecture, assembly generates literals which must always
+# precede the L32R instruction with a relative offset less than 256 kB.
+# Therefore, the .text and .literal section must be combined in parenthesis
+# in the linker script, such as: *(.literal .text).
+#
+# We need to post-process the generated vmlinux.lds scripts to convert
+# *(xxx.text) to *(xxx.literal xxx.text) for the following text sections:
+# .text .ref.text .*init.text .*exit.text .text.*
+#
+# Replicate rules in scripts/Makefile.build
+
+sed-y = -e ':a; s/\*(\([^)]*\)\.text\.unlikely/*(\1.literal.unlikely .{text}.unlikely/; ta; ' \
+ -e ':b; s/\*(\([^)]*\)\.text\(\.[a-z]*\)/*(\1.{text}\2.literal .{text}\2/; tb; ' \
+ -e ':c; s/\*(\([^)]*\)\(\.[a-z]*it\|\.ref\)\.text/*(\1\2.literal \2.{text}/; tc; ' \
+ -e ':d; s/\*(\([^)]\+ \|\)\.text/*(\1.literal .{text}/; td; ' \
+ -e 's/\.{text}/.text/g'
+
+quiet_cmd__cpp_lds_S = LDS $@
+cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
+ | sed $(sed-y) >$@
+
+$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
+ $(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
new file mode 100644
index 000000000..9301452e5
--- /dev/null
+++ b/arch/xtensa/kernel/align.S
@@ -0,0 +1,485 @@
+/*
+ * arch/xtensa/kernel/align.S
+ *
+ * Handle unalignment exceptions in kernel space.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica, Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ *
+ * Rewritten by Chris Zankel <chris@zankel.net>
+ *
+ * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
+ */
+
+#include <linux/linkage.h>
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+
+/* First-level exception handler for unaligned exceptions.
+ *
+ * Note: This handler works only for kernel exceptions. Unaligned user
+ * access should get a seg fault.
+ */
+
+/* Big and little endian 16-bit values are located in
+ * different halves of a register. HWORD_START helps to
+ * abstract the notion of extracting a 16-bit value from a
+ * register.
+ * We also have to define new shifting instructions because
+ * lsb and msb are on 'opposite' ends in a register for
+ * different endian machines.
+ *
+ * Assume a memory region in ascending address:
+ * 0 1 2 3|4 5 6 7
+ *
+ * When loading one word into a register, the content of that register is:
+ * LE 3 2 1 0, 7 6 5 4
+ * BE 0 1 2 3, 4 5 6 7
+ *
+ * Masking the bits of the higher/lower address means:
+ * LE X X 0 0, 0 0 X X
+ * BE 0 0 X X, X X 0 0
+ *
+ * Shifting to higher/lower addresses, means:
+ * LE shift left / shift right
+ * BE shift right / shift left
+ *
+ * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
+ * LE mask 0 0 X X / shift left
+ * BE shift left / mask 0 0 X X
+ */
+
+#define UNALIGNED_USER_EXCEPTION
+
+#if XCHAL_HAVE_BE
+
+#define HWORD_START 16
+#define INSN_OP0 28
+#define INSN_T 24
+#define INSN_OP1 16
+
+.macro __ssa8r r; ssa8l \r; .endm
+.macro __sh r, s; srl \r, \s; .endm
+.macro __sl r, s; sll \r, \s; .endm
+.macro __exth r, s; extui \r, \s, 0, 16; .endm
+.macro __extl r, s; slli \r, \s, 16; .endm
+
+#else
+
+#define HWORD_START 0
+#define INSN_OP0 0
+#define INSN_T 4
+#define INSN_OP1 12
+
+.macro __ssa8r r; ssa8b \r; .endm
+.macro __sh r, s; sll \r, \s; .endm
+.macro __sl r, s; srl \r, \s; .endm
+.macro __exth r, s; slli \r, \s, 16; .endm
+.macro __extl r, s; extui \r, \s, 0, 16; .endm
+
+#endif
+
+/*
+ * xxxx xxxx = imm8 field
+ * yyyy = imm4 field
+ * ssss = s field
+ * tttt = t field
+ *
+ * 16 0
+ * -------------------
+ * L32I.N yyyy ssss tttt 1000
+ * S32I.N yyyy ssss tttt 1001
+ *
+ * 23 0
+ * -----------------------------
+ * res 0000 0010
+ * L16UI xxxx xxxx 0001 ssss tttt 0010
+ * L32I xxxx xxxx 0010 ssss tttt 0010
+ * XXX 0011 ssss tttt 0010
+ * XXX 0100 ssss tttt 0010
+ * S16I xxxx xxxx 0101 ssss tttt 0010
+ * S32I xxxx xxxx 0110 ssss tttt 0010
+ * XXX 0111 ssss tttt 0010
+ * XXX 1000 ssss tttt 0010
+ * L16SI xxxx xxxx 1001 ssss tttt 0010
+ * XXX 1010 0010
+ * **L32AI xxxx xxxx 1011 ssss tttt 0010 unsupported
+ * XXX 1100 0010
+ * XXX 1101 0010
+ * XXX 1110 0010
+ * **S32RI xxxx xxxx 1111 ssss tttt 0010 unsupported
+ * -----------------------------
+ * ^ ^ ^
+ * sub-opcode (NIBBLE_R) -+ | |
+ * t field (NIBBLE_T) -----------+ |
+ * major opcode (NIBBLE_OP0) --------------+
+ */
+
+#define OP0_L32I_N 0x8 /* load immediate narrow */
+#define OP0_S32I_N 0x9 /* store immediate narrow */
+#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */
+#define OP1_SI_BIT 2 /* OP1 bit number for stores */
+
+#define OP1_L32I 0x2
+#define OP1_L16UI 0x1
+#define OP1_L16SI 0x9
+#define OP1_L32AI 0xb
+
+#define OP1_S32I 0x6
+#define OP1_S16I 0x5
+#define OP1_S32RI 0xf
+
+/*
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ .literal_position
+ENTRY(fast_unaligned)
+
+ /* Note: We don't expect the address to be aligned on a word
+ * boundary. After all, the processor generated that exception
+ * and it would be a hardware fault.
+ */
+
+ /* Save some working register */
+
+ s32i a4, a2, PT_AREG4
+ s32i a5, a2, PT_AREG5
+ s32i a6, a2, PT_AREG6
+ s32i a7, a2, PT_AREG7
+ s32i a8, a2, PT_AREG8
+
+ rsr a0, depc
+ s32i a0, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+
+ rsr a3, excsave1
+ movi a4, fast_unaligned_fixup
+ s32i a4, a3, EXC_TABLE_FIXUP
+
+ /* Keep value of SAR in a0 */
+
+ rsr a0, sar
+ rsr a8, excvaddr # load unaligned memory address
+
+ /* Now, identify one of the following load/store instructions.
+ *
+ * The only possible danger of a double exception on the
+ * following l32i instructions is kernel code in vmalloc
+ * memory. The processor was just executing at the EPC_1
+ * address, and indeed, already fetched the instruction. That
+ * guarantees a TLB mapping, which hasn't been replaced by
+ * this unaligned exception handler that uses only static TLB
+ * mappings. However, high-level interrupt handlers might
+ * modify TLB entries, so for the generic case, we register a
+ * TABLE_FIXUP handler here, too.
+ */
+
+ /* a3...a6 saved on stack, a2 = SP */
+
+ /* Extract the instruction that caused the unaligned access. */
+
+ rsr a7, epc1 # load exception address
+ movi a3, ~3
+ and a3, a3, a7 # mask lower bits
+
+ l32i a4, a3, 0 # load 2 words
+ l32i a5, a3, 4
+
+ __ssa8 a7
+ __src_b a4, a4, a5 # a4 has the instruction
+
+ /* Analyze the instruction (load or store?). */
+
+ extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
+
+#if XCHAL_HAVE_DENSITY
+ _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
+ addi a6, a5, -OP0_S32I_N
+ _beqz a6, .Lstore # S32I.N, do a store
+#endif
+ /* 'store indicator bit' not set, jump */
+ _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
+
+ /* Store: Jump to table entry to get the value in the source register.*/
+
+.Lstore:movi a5, .Lstore_table # table
+ extui a6, a4, INSN_T, 4 # get source register
+ addx8 a5, a6, a5
+ jx a5 # jump into table
+
+ /* Load: Load memory address. */
+
+.Lload: movi a3, ~3
+ and a3, a3, a8 # align memory address
+
+ __ssa8 a8
+#ifdef UNALIGNED_USER_EXCEPTION
+ addi a3, a3, 8
+ l32e a5, a3, -8
+ l32e a6, a3, -4
+#else
+ l32i a5, a3, 0
+ l32i a6, a3, 4
+#endif
+ __src_b a3, a5, a6 # a3 has the data word
+
+#if XCHAL_HAVE_DENSITY
+ addi a7, a7, 2 # increment PC (assume 16-bit insn)
+
+ extui a5, a4, INSN_OP0, 4
+ _beqi a5, OP0_L32I_N, 1f # l32i.n: jump
+
+ addi a7, a7, 1
+#else
+ addi a7, a7, 3
+#endif
+
+ extui a5, a4, INSN_OP1, 4
+ _beqi a5, OP1_L32I, 1f # l32i: jump
+
+ extui a3, a3, 0, 16 # extract lower 16 bits
+ _beqi a5, OP1_L16UI, 1f
+ addi a5, a5, -OP1_L16SI
+ _bnez a5, .Linvalid_instruction_load
+
+ /* sign extend value */
+
+ slli a3, a3, 16
+ srai a3, a3, 16
+
+ /* Set target register. */
+
+1:
+ extui a4, a4, INSN_T, 4 # extract target register
+ movi a5, .Lload_table
+ addx8 a4, a4, a5
+ jx a4 # jump to entry for target register
+
+ .align 8
+.Lload_table:
+ s32i a3, a2, PT_AREG0; _j .Lexit; .align 8
+ mov a1, a3; _j .Lexit; .align 8 # fishy??
+ s32i a3, a2, PT_AREG2; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG3; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG4; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG5; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG6; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG7; _j .Lexit; .align 8
+ s32i a3, a2, PT_AREG8; _j .Lexit; .align 8
+ mov a9, a3 ; _j .Lexit; .align 8
+ mov a10, a3 ; _j .Lexit; .align 8
+ mov a11, a3 ; _j .Lexit; .align 8
+ mov a12, a3 ; _j .Lexit; .align 8
+ mov a13, a3 ; _j .Lexit; .align 8
+ mov a14, a3 ; _j .Lexit; .align 8
+ mov a15, a3 ; _j .Lexit; .align 8
+
+.Lstore_table:
+ l32i a3, a2, PT_AREG0; _j 1f; .align 8
+ mov a3, a1; _j 1f; .align 8 # fishy??
+ l32i a3, a2, PT_AREG2; _j 1f; .align 8
+ l32i a3, a2, PT_AREG3; _j 1f; .align 8
+ l32i a3, a2, PT_AREG4; _j 1f; .align 8
+ l32i a3, a2, PT_AREG5; _j 1f; .align 8
+ l32i a3, a2, PT_AREG6; _j 1f; .align 8
+ l32i a3, a2, PT_AREG7; _j 1f; .align 8
+ l32i a3, a2, PT_AREG8; _j 1f; .align 8
+ mov a3, a9 ; _j 1f; .align 8
+ mov a3, a10 ; _j 1f; .align 8
+ mov a3, a11 ; _j 1f; .align 8
+ mov a3, a12 ; _j 1f; .align 8
+ mov a3, a13 ; _j 1f; .align 8
+ mov a3, a14 ; _j 1f; .align 8
+ mov a3, a15 ; _j 1f; .align 8
+
+ /* We cannot handle this exception. */
+
+ .extern _kernel_exception
+.Linvalid_instruction_load:
+.Linvalid_instruction_store:
+
+ movi a4, 0
+ rsr a3, excsave1
+ s32i a4, a3, EXC_TABLE_FIXUP
+
+ /* Restore a4...a8 and SAR, set SP, and jump to default exception. */
+
+ l32i a8, a2, PT_AREG8
+ l32i a7, a2, PT_AREG7
+ l32i a6, a2, PT_AREG6
+ l32i a5, a2, PT_AREG5
+ l32i a4, a2, PT_AREG4
+ wsr a0, sar
+ mov a1, a2
+
+ rsr a0, ps
+ bbsi.l a0, PS_UM_BIT, 2f # jump if user mode
+
+ movi a0, _kernel_exception
+ jx a0
+
+2: movi a0, _user_exception
+ jx a0
+
+1: # a7: instruction pointer, a4: instruction, a3: value
+
+ movi a6, 0 # mask: ffffffff:00000000
+
+#if XCHAL_HAVE_DENSITY
+ addi a7, a7, 2 # incr. PC,assume 16-bit instruction
+
+ extui a5, a4, INSN_OP0, 4 # extract OP0
+ addi a5, a5, -OP0_S32I_N
+ _beqz a5, 1f # s32i.n: jump
+
+ addi a7, a7, 1 # increment PC, 32-bit instruction
+#else
+ addi a7, a7, 3 # increment PC, 32-bit instruction
+#endif
+
+ extui a5, a4, INSN_OP1, 4 # extract OP1
+ _beqi a5, OP1_S32I, 1f # jump if 32 bit store
+ _bnei a5, OP1_S16I, .Linvalid_instruction_store
+
+ movi a5, -1
+ __extl a3, a3 # get 16-bit value
+ __exth a6, a5 # get 16-bit mask ffffffff:ffff0000
+
+ /* Get memory address */
+
+1:
+ movi a4, ~3
+ and a4, a4, a8 # align memory address
+
+ /* Insert value into memory */
+
+ movi a5, -1 # mask: ffffffff:XXXX0000
+#ifdef UNALIGNED_USER_EXCEPTION
+ addi a4, a4, 8
+#endif
+
+ __ssa8r a8
+ __src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE)
+ __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE)
+#ifdef UNALIGNED_USER_EXCEPTION
+ l32e a5, a4, -8
+#else
+ l32i a5, a4, 0 # load lower address word
+#endif
+ and a5, a5, a8 # mask
+ __sh a8, a3 # shift value
+ or a5, a5, a8 # or with original value
+#ifdef UNALIGNED_USER_EXCEPTION
+ s32e a5, a4, -8
+ l32e a8, a4, -4
+#else
+ s32i a5, a4, 0 # store
+ l32i a8, a4, 4 # same for upper address word
+#endif
+ __sl a5, a3
+ and a6, a8, a6
+ or a6, a6, a5
+#ifdef UNALIGNED_USER_EXCEPTION
+ s32e a6, a4, -4
+#else
+ s32i a6, a4, 4
+#endif
+
+.Lexit:
+#if XCHAL_HAVE_LOOPS
+ rsr a4, lend # check if we reached LEND
+ bne a7, a4, 1f
+ rsr a4, lcount # and LCOUNT != 0
+ beqz a4, 1f
+ addi a4, a4, -1 # decrement LCOUNT and set
+ rsr a7, lbeg # set PC to LBEGIN
+ wsr a4, lcount
+#endif
+
+1: wsr a7, epc1 # skip emulated instruction
+
+ /* Update icount if we're single-stepping in userspace. */
+ rsr a4, icountlevel
+ beqz a4, 1f
+ bgeui a4, LOCKLEVEL + 1, 1f
+ rsr a4, icount
+ addi a4, a4, 1
+ wsr a4, icount
+1:
+ movi a4, 0
+ rsr a3, excsave1
+ s32i a4, a3, EXC_TABLE_FIXUP
+
+ /* Restore working register */
+
+ l32i a8, a2, PT_AREG8
+ l32i a7, a2, PT_AREG7
+ l32i a6, a2, PT_AREG6
+ l32i a5, a2, PT_AREG5
+ l32i a4, a2, PT_AREG4
+ l32i a3, a2, PT_AREG3
+
+ /* restore SAR and return */
+
+ wsr a0, sar
+ l32i a0, a2, PT_AREG0
+ l32i a2, a2, PT_AREG2
+ rfe
+
+ENDPROC(fast_unaligned)
+
+ENTRY(fast_unaligned_fixup)
+
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ wsr a3, excsave1
+
+ l32i a8, a2, PT_AREG8
+ l32i a7, a2, PT_AREG7
+ l32i a6, a2, PT_AREG6
+ l32i a5, a2, PT_AREG5
+ l32i a4, a2, PT_AREG4
+ l32i a0, a2, PT_AREG2
+ xsr a0, depc # restore depc and a0
+ wsr a0, sar
+
+ rsr a0, exccause
+ s32i a0, a2, PT_DEPC # mark as a regular exception
+
+ rsr a0, ps
+ bbsi.l a0, PS_UM_BIT, 1f # jump if user mode
+
+ rsr a0, exccause
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler
+ l32i a3, a2, PT_AREG3
+ jx a0
+1:
+ rsr a0, exccause
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ l32i a3, a2, PT_AREG3
+ jx a0
+
+ENDPROC(fast_unaligned_fixup)
+
+#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
new file mode 100644
index 000000000..120dd746a
--- /dev/null
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -0,0 +1,149 @@
+/*
+ * arch/xtensa/kernel/asm-offsets.c
+ *
+ * Generates definitions from c-type structures used by assembly sources.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+
+int main(void)
+{
+ /* struct pt_regs */
+ DEFINE(PT_PC, offsetof (struct pt_regs, pc));
+ DEFINE(PT_PS, offsetof (struct pt_regs, ps));
+ DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
+ DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
+ DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
+ DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
+ DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
+ DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
+ DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
+ DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
+ DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
+ DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
+ DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
+ DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
+ DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr));
+ DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
+ DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
+ DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
+ DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
+ DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
+ DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
+ DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
+ DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
+ DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
+ DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
+ DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
+ DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
+ DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
+ DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
+ DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
+ DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
+ DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
+ DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
+ DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
+ DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
+ DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
+ DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t));
+
+ /* struct task_struct */
+ DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
+ DEFINE(TASK_MM, offsetof (struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
+ DEFINE(TASK_PID, offsetof (struct task_struct, pid));
+ DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
+#ifdef CONFIG_STACKPROTECTOR
+ DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
+ DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
+
+ /* offsets in thread_info struct */
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_STSTUS, thread_info, status);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+
+ /* struct thread_info (offset from start_struct) */
+ DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
+ DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
+ DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
+#if XTENSA_HAVE_COPROCESSORS
+ DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
+ DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
+ DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
+ DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
+ DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
+ DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
+ DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
+ DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
+#endif
+ DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
+ DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
+ DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \
+ thread.current_ds));
+
+ /* struct mm_struct */
+ DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
+ DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
+ DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
+
+ /* struct page */
+ DEFINE(PAGE_FLAGS, offsetof(struct page, flags));
+
+ /* constants */
+ DEFINE(_CLONE_VM, CLONE_VM);
+ DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED);
+ DEFINE(PG_ARCH_1, PG_arch_1);
+
+ /* struct debug_table */
+ DEFINE(DT_DEBUG_EXCEPTION,
+ offsetof(struct debug_table, debug_exception));
+ DEFINE(DT_DEBUG_SAVE, offsetof(struct debug_table, debug_save));
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ DEFINE(DT_DBREAKC_SAVE, offsetof(struct debug_table, dbreakc_save));
+ DEFINE(DT_ICOUNT_SAVE, offsetof(struct debug_table, icount_save));
+ DEFINE(DT_ICOUNT_LEVEL_SAVE,
+ offsetof(struct debug_table, icount_level_save));
+#endif
+
+ /* struct exc_table */
+ DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk));
+ DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save));
+ DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup));
+ DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param));
+ DEFINE(EXC_TABLE_SYSCALL_SAVE,
+ offsetof(struct exc_table, syscall_save));
+ DEFINE(EXC_TABLE_FAST_USER,
+ offsetof(struct exc_table, fast_user_handler));
+ DEFINE(EXC_TABLE_FAST_KERNEL,
+ offsetof(struct exc_table, fast_kernel_handler));
+ DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler));
+
+ return 0;
+}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
new file mode 100644
index 000000000..4f8b52d57
--- /dev/null
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -0,0 +1,330 @@
+/*
+ * arch/xtensa/kernel/coprocessor.S
+ *
+ * Xtensa processor configuration-specific table of coprocessor and
+ * other custom register layout information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+#include <asm/thread_info.h>
+#include <asm/asm-uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/tlbflush.h>
+
+#if XTENSA_HAVE_COPROCESSORS
+
+/*
+ * Macros for lazy context switch.
+ */
+
+#define SAVE_CP_REGS(x) \
+ .align 4; \
+ .Lsave_cp_regs_cp##x: \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ xchal_cp##x##_store a2 a4 a5 a6 a7; \
+ .endif; \
+ jx a0
+
+#define SAVE_CP_REGS_TAB(x) \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ .long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \
+ .else; \
+ .long 0; \
+ .endif; \
+ .long THREAD_XTREGS_CP##x
+
+
+#define LOAD_CP_REGS(x) \
+ .align 4; \
+ .Lload_cp_regs_cp##x: \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ xchal_cp##x##_load a2 a4 a5 a6 a7; \
+ .endif; \
+ jx a0
+
+#define LOAD_CP_REGS_TAB(x) \
+ .if XTENSA_HAVE_COPROCESSOR(x); \
+ .long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \
+ .else; \
+ .long 0; \
+ .endif; \
+ .long THREAD_XTREGS_CP##x
+
+ SAVE_CP_REGS(0)
+ SAVE_CP_REGS(1)
+ SAVE_CP_REGS(2)
+ SAVE_CP_REGS(3)
+ SAVE_CP_REGS(4)
+ SAVE_CP_REGS(5)
+ SAVE_CP_REGS(6)
+ SAVE_CP_REGS(7)
+
+ LOAD_CP_REGS(0)
+ LOAD_CP_REGS(1)
+ LOAD_CP_REGS(2)
+ LOAD_CP_REGS(3)
+ LOAD_CP_REGS(4)
+ LOAD_CP_REGS(5)
+ LOAD_CP_REGS(6)
+ LOAD_CP_REGS(7)
+
+ .align 4
+.Lsave_cp_regs_jump_table:
+ SAVE_CP_REGS_TAB(0)
+ SAVE_CP_REGS_TAB(1)
+ SAVE_CP_REGS_TAB(2)
+ SAVE_CP_REGS_TAB(3)
+ SAVE_CP_REGS_TAB(4)
+ SAVE_CP_REGS_TAB(5)
+ SAVE_CP_REGS_TAB(6)
+ SAVE_CP_REGS_TAB(7)
+
+.Lload_cp_regs_jump_table:
+ LOAD_CP_REGS_TAB(0)
+ LOAD_CP_REGS_TAB(1)
+ LOAD_CP_REGS_TAB(2)
+ LOAD_CP_REGS_TAB(3)
+ LOAD_CP_REGS_TAB(4)
+ LOAD_CP_REGS_TAB(5)
+ LOAD_CP_REGS_TAB(6)
+ LOAD_CP_REGS_TAB(7)
+
+/*
+ * coprocessor_save(buffer, index)
+ * a2 a3
+ * coprocessor_load(buffer, index)
+ * a2 a3
+ *
+ * Save or load coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from them 'buffer' address.
+ *
+ * Note that these functions don't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_save)
+
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lsave_cp_regs_jump_table
+ addx8 a3, a3, a0
+ l32i a3, a3, 0
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
+ retw
+
+ENDPROC(coprocessor_save)
+
+ENTRY(coprocessor_load)
+
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lload_cp_regs_jump_table
+ addx4 a3, a3, a0
+ l32i a3, a3, 0
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
+ retw
+
+ENDPROC(coprocessor_load)
+
+/*
+ * coprocessor_flush(struct task_info*, index)
+ * a2 a3
+ * coprocessor_restore(struct task_info*, index)
+ * a2 a3
+ *
+ * Save or load coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that these functions don't update the coprocessor_owner information!
+ *
+ */
+
+
+ENTRY(coprocessor_flush)
+
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lsave_cp_regs_jump_table
+ addx8 a3, a3, a0
+ l32i a4, a3, 4
+ l32i a3, a3, 0
+ add a2, a2, a4
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
+ retw
+
+ENDPROC(coprocessor_flush)
+
+ENTRY(coprocessor_restore)
+ entry a1, 32
+ s32i a0, a1, 0
+ movi a0, .Lload_cp_regs_jump_table
+ addx4 a3, a3, a0
+ l32i a4, a3, 4
+ l32i a3, a3, 0
+ add a2, a2, a4
+ beqz a3, 1f
+ add a0, a0, a3
+ callx0 a0
+1: l32i a0, a1, 0
+ retw
+
+ENDPROC(coprocessor_restore)
+
+/*
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_coprocessor_double)
+
+ wsr a0, excsave1
+ call0 unrecoverable_exception
+
+ENDPROC(fast_coprocessor_double)
+
+ENTRY(fast_coprocessor)
+
+ /* Save remaining registers a1-a3 and SAR */
+
+ s32i a3, a2, PT_AREG3
+ rsr a3, sar
+ s32i a1, a2, PT_AREG1
+ s32i a3, a2, PT_SAR
+ mov a1, a2
+ rsr a2, depc
+ s32i a2, a1, PT_AREG2
+
+ /*
+ * The hal macros require up to 4 temporary registers. We use a3..a6.
+ */
+
+ s32i a4, a1, PT_AREG4
+ s32i a5, a1, PT_AREG5
+ s32i a6, a1, PT_AREG6
+
+ /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
+
+ rsr a3, exccause
+ addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+
+ /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
+
+ ssl a3 # SAR: 32 - coprocessor_number
+ movi a2, 1
+ rsr a0, cpenable
+ sll a2, a2
+ or a0, a0, a2
+ wsr a0, cpenable
+ rsync
+
+ /* Retrieve previous owner. (a3 still holds CP number) */
+
+ movi a0, coprocessor_owner # list of owners
+ addx4 a0, a3, a0 # entry for CP
+ l32i a4, a0, 0
+
+ beqz a4, 1f # skip 'save' if no previous owner
+
+ /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
+
+ l32i a5, a4, THREAD_CPENABLE
+ xor a5, a5, a2 # (1 << cp-id) still in a2
+ s32i a5, a4, THREAD_CPENABLE
+
+ /*
+ * Get context save area and 'call' save routine.
+ * (a4 still holds previous owner (thread_info), a3 CP number)
+ */
+
+ movi a5, .Lsave_cp_regs_jump_table
+ movi a0, 2f # a0: 'return' address
+ addx8 a3, a3, a5 # a3: coprocessor number
+ l32i a2, a3, 4 # a2: xtregs offset
+ l32i a3, a3, 0 # a3: jump offset
+ add a2, a2, a4
+ add a4, a3, a5 # a4: address of save routine
+ jx a4
+
+ /* Note that only a0 and a1 were preserved. */
+
+2: rsr a3, exccause
+ addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
+ movi a0, coprocessor_owner
+ addx4 a0, a3, a0
+
+ /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
+
+1: GET_THREAD_INFO (a4, a1)
+ s32i a4, a0, 0
+
+ /* Get context save area and 'call' load routine. */
+
+ movi a5, .Lload_cp_regs_jump_table
+ movi a0, 1f
+ addx8 a3, a3, a5
+ l32i a2, a3, 4 # a2: xtregs offset
+ l32i a3, a3, 0 # a3: jump offset
+ add a2, a2, a4
+ add a4, a3, a5
+ jx a4
+
+ /* Restore all registers and return from exception handler. */
+
+1: l32i a6, a1, PT_AREG6
+ l32i a5, a1, PT_AREG5
+ l32i a4, a1, PT_AREG4
+
+ l32i a0, a1, PT_SAR
+ l32i a3, a1, PT_AREG3
+ l32i a2, a1, PT_AREG2
+ wsr a0, sar
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+
+ rfe
+
+ENDPROC(fast_coprocessor)
+
+ .data
+
+ENTRY(coprocessor_owner)
+
+ .fill XCHAL_CP_MAX, 4, 0
+
+END(coprocessor_owner)
+
+#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
new file mode 100644
index 000000000..9cbc380e9
--- /dev/null
+++ b/arch/xtensa/kernel/entry.S
@@ -0,0 +1,2047 @@
+/*
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 - 2008 by Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+#include <asm/thread_info.h>
+#include <asm/asm-uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/tlbflush.h>
+#include <variant/tie-asm.h>
+
+/* Unimplemented features. */
+
+#undef KERNEL_STACK_OVERFLOW_CHECK
+
+/* Not well tested.
+ *
+ * - fast_coprocessor
+ */
+
+/*
+ * Macro to find first bit set in WINDOWBASE from the left + 1
+ *
+ * 100....0 -> 1
+ * 010....0 -> 2
+ * 000....1 -> WSBITS
+ */
+
+ .macro ffs_ws bit mask
+
+#if XCHAL_HAVE_NSA
+ nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
+ addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
+#else
+ movi \bit, WSBITS
+#if WSBITS > 16
+ _bltui \mask, 0x10000, 99f
+ addi \bit, \bit, -16
+ extui \mask, \mask, 16, 16
+#endif
+#if WSBITS > 8
+99: _bltui \mask, 0x100, 99f
+ addi \bit, \bit, -8
+ srli \mask, \mask, 8
+#endif
+99: _bltui \mask, 0x10, 99f
+ addi \bit, \bit, -4
+ srli \mask, \mask, 4
+99: _bltui \mask, 0x4, 99f
+ addi \bit, \bit, -2
+ srli \mask, \mask, 2
+99: _bltui \mask, 0x2, 99f
+ addi \bit, \bit, -1
+99:
+
+#endif
+ .endm
+
+
+ .macro irq_save flags tmp
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+ rsr \flags, ps
+ extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ bgei \tmp, LOCKLEVEL, 99f
+ rsil \tmp, LOCKLEVEL
+99:
+#else
+ movi \tmp, LOCKLEVEL
+ rsr \flags, ps
+ or \flags, \flags, \tmp
+ xsr \flags, ps
+ rsync
+#endif
+#else
+ rsil \flags, LOCKLEVEL
+#endif
+ .endm
+
+/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
+
+/*
+ * First-level exception handler for user exceptions.
+ * Save some special registers, extra states and all registers in the AR
+ * register file that were in use in the user task, and jump to the common
+ * exception code.
+ * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
+ * save them for kernel exceptions).
+ *
+ * Entry condition for user_exception:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original value in depc
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Entry condition for _user_exception:
+ *
+ * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
+ * excsave has been restored, and
+ * stack pointer (a1) has been set.
+ *
+ * Note: _user_exception might be at an odd address. Don't use call0..call12
+ */
+ .literal_position
+
+ENTRY(user_exception)
+
+ /* Save a1, a2, a3, and set SP. */
+
+ rsr a0, depc
+ s32i a1, a2, PT_AREG1
+ s32i a0, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+ mov a1, a2
+
+ .globl _user_exception
+_user_exception:
+
+ /* Save SAR and turn off single stepping */
+
+ movi a2, 0
+ wsr a2, depc # terminate user stack trace with 0
+ rsr a3, sar
+ xsr a2, icountlevel
+ s32i a3, a1, PT_SAR
+ s32i a2, a1, PT_ICOUNTLEVEL
+
+#if XCHAL_HAVE_THREADPTR
+ rur a2, threadptr
+ s32i a2, a1, PT_THREADPTR
+#endif
+
+ /* Rotate ws so that the current windowbase is at bit0. */
+ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
+
+ rsr a2, windowbase
+ rsr a3, windowstart
+ ssr a2
+ s32i a2, a1, PT_WINDOWBASE
+ s32i a3, a1, PT_WINDOWSTART
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2
+ srli a2, a2, 32-WSBITS
+ s32i a2, a1, PT_WMASK # needed for restoring registers
+
+ /* Save only live registers. */
+
+ _bbsi.l a2, 1, 1f
+ s32i a4, a1, PT_AREG4
+ s32i a5, a1, PT_AREG5
+ s32i a6, a1, PT_AREG6
+ s32i a7, a1, PT_AREG7
+ _bbsi.l a2, 2, 1f
+ s32i a8, a1, PT_AREG8
+ s32i a9, a1, PT_AREG9
+ s32i a10, a1, PT_AREG10
+ s32i a11, a1, PT_AREG11
+ _bbsi.l a2, 3, 1f
+ s32i a12, a1, PT_AREG12
+ s32i a13, a1, PT_AREG13
+ s32i a14, a1, PT_AREG14
+ s32i a15, a1, PT_AREG15
+ _bnei a2, 1, 1f # only one valid frame?
+
+ /* Only one valid frame, skip saving regs. */
+
+ j 2f
+
+ /* Save the remaining registers.
+ * We have to save all registers up to the first '1' from
+ * the right, except the current frame (bit 0).
+ * Assume a2 is: 001001000110001
+ * All register frames starting from the top field to the marked '1'
+ * must be saved.
+ */
+
+1: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
+ neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
+ and a3, a3, a2 # max. only one bit is set
+
+ /* Find number of frames to save */
+
+ ffs_ws a0, a3 # number of frames to the '1' from left
+
+ /* Store information into WMASK:
+ * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
+ * bits 4...: number of valid 4-register frames
+ */
+
+ slli a3, a0, 4 # number of frames to save in bits 8..4
+ extui a2, a2, 0, 4 # mask for the first 16 registers
+ or a2, a3, a2
+ s32i a2, a1, PT_WMASK # needed when we restore the reg-file
+
+ /* Save 4 registers at a time */
+
+1: rotw -1
+ s32i a0, a5, PT_AREG_END - 16
+ s32i a1, a5, PT_AREG_END - 12
+ s32i a2, a5, PT_AREG_END - 8
+ s32i a3, a5, PT_AREG_END - 4
+ addi a0, a4, -1
+ addi a1, a5, -16
+ _bnez a0, 1b
+
+ /* WINDOWBASE still in SAR! */
+
+ rsr a2, sar # original WINDOWBASE
+ movi a3, 1
+ ssl a2
+ sll a3, a3
+ wsr a3, windowstart # set corresponding WINDOWSTART bit
+ wsr a2, windowbase # and WINDOWSTART
+ rsync
+
+ /* We are back to the original stack pointer (a1) */
+
+2: /* Now, jump to the common exception handler. */
+
+ j common_exception
+
+ENDPROC(user_exception)
+
+/*
+ * First-level exit handler for kernel exceptions
+ * Save special registers and the live window frame.
+ * Note: Even though we changes the stack pointer, we don't have to do a
+ * MOVSP here, as we do that when we return from the exception.
+ * (See comment in the kernel exception exit code)
+ *
+ * Entry condition for kernel_exception:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Entry condition for _kernel_exception:
+ *
+ * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
+ * excsave has been restored, and
+ * stack pointer (a1) has been set.
+ *
+ * Note: _kernel_exception might be at an odd address. Don't use call0..call12
+ */
+
+ENTRY(kernel_exception)
+
+ /* Save a1, a2, a3, and set SP. */
+
+ rsr a0, depc # get a2
+ s32i a1, a2, PT_AREG1
+ s32i a0, a2, PT_AREG2
+ s32i a3, a2, PT_AREG3
+ mov a1, a2
+
+ .globl _kernel_exception
+_kernel_exception:
+
+ /* Save SAR and turn off single stepping */
+
+ movi a2, 0
+ rsr a3, sar
+ xsr a2, icountlevel
+ s32i a3, a1, PT_SAR
+ s32i a2, a1, PT_ICOUNTLEVEL
+
+ /* Rotate ws so that the current windowbase is at bit0. */
+ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
+
+ rsr a2, windowbase # don't need to save these, we only
+ rsr a3, windowstart # need shifted windowstart: windowmask
+ ssr a2
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2
+ srli a2, a2, 32-WSBITS
+ s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
+
+ /* Save only the live window-frame */
+
+ _bbsi.l a2, 1, 1f
+ s32i a4, a1, PT_AREG4
+ s32i a5, a1, PT_AREG5
+ s32i a6, a1, PT_AREG6
+ s32i a7, a1, PT_AREG7
+ _bbsi.l a2, 2, 1f
+ s32i a8, a1, PT_AREG8
+ s32i a9, a1, PT_AREG9
+ s32i a10, a1, PT_AREG10
+ s32i a11, a1, PT_AREG11
+ _bbsi.l a2, 3, 1f
+ s32i a12, a1, PT_AREG12
+ s32i a13, a1, PT_AREG13
+ s32i a14, a1, PT_AREG14
+ s32i a15, a1, PT_AREG15
+
+ _bnei a2, 1, 1f
+
+ /* Copy spill slots of a0 and a1 to imitate movsp
+ * in order to keep exception stack continuous
+ */
+ l32i a3, a1, PT_SIZE
+ l32i a0, a1, PT_SIZE + 4
+ s32e a3, a1, -16
+ s32e a0, a1, -12
+1:
+ l32i a0, a1, PT_AREG0 # restore saved a0
+ wsr a0, depc
+
+#ifdef KERNEL_STACK_OVERFLOW_CHECK
+
+ /* Stack overflow check, for debugging */
+ extui a2, a1, TASK_SIZE_BITS,XX
+ movi a3, SIZE??
+ _bge a2, a3, out_of_stack_panic
+
+#endif
+
+/*
+ * This is the common exception handler.
+ * We get here from the user exception handler or simply by falling through
+ * from the kernel exception handler.
+ * Save the remaining special registers, switch to kernel mode, and jump
+ * to the second-level exception handler.
+ *
+ */
+
+common_exception:
+
+ /* Save some registers, disable loops and clear the syscall flag. */
+
+ rsr a2, debugcause
+ rsr a3, epc1
+ s32i a2, a1, PT_DEBUGCAUSE
+ s32i a3, a1, PT_PC
+
+ movi a2, -1
+ rsr a3, excvaddr
+ s32i a2, a1, PT_SYSCALL
+ movi a2, 0
+ s32i a3, a1, PT_EXCVADDR
+#if XCHAL_HAVE_LOOPS
+ xsr a2, lcount
+ s32i a2, a1, PT_LCOUNT
+#endif
+
+ /* It is now save to restore the EXC_TABLE_FIXUP variable. */
+
+ rsr a2, exccause
+ movi a3, 0
+ rsr a0, excsave1
+ s32i a2, a1, PT_EXCCAUSE
+ s32i a3, a0, EXC_TABLE_FIXUP
+
+ /* All unrecoverable states are saved on stack, now, and a1 is valid.
+ * Now we can allow exceptions again. In case we've got an interrupt
+ * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
+ * otherwise it's left unchanged.
+ *
+ * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
+ */
+
+ rsr a3, ps
+ s32i a3, a1, PT_PS # save ps
+
+#if XTENSA_FAKE_NMI
+ /* Correct PS needs to be saved in the PT_PS:
+ * - in case of exception or level-1 interrupt it's in the PS,
+ * and is already saved.
+ * - in case of medium level interrupt it's in the excsave2.
+ */
+ movi a0, EXCCAUSE_MAPPED_NMI
+ extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ beq a2, a0, .Lmedium_level_irq
+ bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
+ beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0
+
+.Lmedium_level_irq:
+ rsr a0, excsave2
+ s32i a0, a1, PT_PS # save medium-level interrupt ps
+ bgei a3, LOCKLEVEL, .Lexception
+
+.Llevel1_irq:
+ movi a3, LOCKLEVEL
+
+.Lexception:
+ movi a0, 1 << PS_WOE_BIT
+ or a3, a3, a0
+#else
+ addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
+ movi a0, LOCKLEVEL
+ extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ # a3 = PS.INTLEVEL
+ moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
+ movi a2, 1 << PS_WOE_BIT
+ or a3, a3, a2
+ rsr a2, exccause
+#endif
+
+ /* restore return address (or 0 if return to userspace) */
+ rsr a0, depc
+ wsr a3, ps
+ rsync # PS.WOE => rsync => overflow
+
+ /* Save lbeg, lend */
+#if XCHAL_HAVE_LOOPS
+ rsr a4, lbeg
+ rsr a3, lend
+ s32i a4, a1, PT_LBEG
+ s32i a3, a1, PT_LEND
+#endif
+
+ /* Save SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+ rsr a3, scompare1
+ s32i a3, a1, PT_SCOMPARE1
+#endif
+
+ /* Save optional registers. */
+
+ save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
+
+ /* Go to second-level dispatcher. Set up parameters to pass to the
+ * exception handler and call the exception handler.
+ */
+
+ rsr a4, excsave1
+ mov a6, a1 # pass stack frame
+ mov a7, a2 # pass EXCCAUSE
+ addx4 a4, a2, a4
+ l32i a4, a4, EXC_TABLE_DEFAULT # load handler
+
+ /* Call the second-level handler */
+
+ callx4 a4
+
+ /* Jump here for exception exit */
+ .global common_exception_return
+common_exception_return:
+
+#if XTENSA_FAKE_NMI
+ l32i a2, a1, PT_EXCCAUSE
+ movi a3, EXCCAUSE_MAPPED_NMI
+ beq a2, a3, .LNMIexit
+#endif
+1:
+ irq_save a2, a3
+#ifdef CONFIG_TRACE_IRQFLAGS
+ call4 trace_hardirqs_off
+#endif
+
+ /* Jump if we are returning from kernel exceptions. */
+
+ l32i a3, a1, PT_PS
+ GET_THREAD_INFO(a2, a1)
+ l32i a4, a2, TI_FLAGS
+ _bbci.l a3, PS_UM_BIT, 6f
+
+ /* Specific to a user exception exit:
+ * We need to check some flags for signal handling and rescheduling,
+ * and have to restore WB and WS, extra states, and all registers
+ * in the register file that were in use in the user task.
+ * Note that we don't disable interrupts here.
+ */
+
+ _bbsi.l a4, TIF_NEED_RESCHED, 3f
+ _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
+ _bbci.l a4, TIF_SIGPENDING, 5f
+
+2: l32i a4, a1, PT_DEPC
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
+
+ /* Call do_signal() */
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ call4 trace_hardirqs_on
+#endif
+ rsil a2, 0
+ mov a6, a1
+ call4 do_notify_resume # int do_notify_resume(struct pt_regs*)
+ j 1b
+
+3: /* Reschedule */
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ call4 trace_hardirqs_on
+#endif
+ rsil a2, 0
+ call4 schedule # void schedule (void)
+ j 1b
+
+#ifdef CONFIG_PREEMPT
+6:
+ _bbci.l a4, TIF_NEED_RESCHED, 4f
+
+ /* Check current_thread_info->preempt_count */
+
+ l32i a4, a2, TI_PRE_COUNT
+ bnez a4, 4f
+ call4 preempt_schedule_irq
+ j 1b
+#endif
+
+#if XTENSA_FAKE_NMI
+.LNMIexit:
+ l32i a3, a1, PT_PS
+ _bbci.l a3, PS_UM_BIT, 4f
+#endif
+
+5:
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ _bbci.l a4, TIF_DB_DISABLED, 7f
+ call4 restore_dbreak
+7:
+#endif
+#ifdef CONFIG_DEBUG_TLB_SANITY
+ l32i a4, a1, PT_DEPC
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
+ call4 check_tlb_sanity
+#endif
+6:
+4:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ bgei a4, LOCKLEVEL, 1f
+ call4 trace_hardirqs_on
+1:
+#endif
+ /* Restore optional registers. */
+
+ load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+
+ /* Restore SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+ l32i a2, a1, PT_SCOMPARE1
+ wsr a2, scompare1
+#endif
+ wsr a3, ps /* disable interrupts */
+
+ _bbci.l a3, PS_UM_BIT, kernel_exception_exit
+
+user_exception_exit:
+
+ /* Restore the state of the task and return from the exception. */
+
+ /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
+
+ l32i a2, a1, PT_WINDOWBASE
+ l32i a3, a1, PT_WINDOWSTART
+ wsr a1, depc # use DEPC as temp storage
+ wsr a3, windowstart # restore WINDOWSTART
+ ssr a2 # preserve user's WB in the SAR
+ wsr a2, windowbase # switch to user's saved WB
+ rsync
+ rsr a1, depc # restore stack pointer
+ l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
+ rotw -1 # we restore a4..a7
+ _bltui a6, 16, 1f # only have to restore current window?
+
+ /* The working registers are a0 and a3. We are restoring to
+ * a4..a7. Be careful not to destroy what we have just restored.
+ * Note: wmask has the format YYYYM:
+ * Y: number of registers saved in groups of 4
+ * M: 4 bit mask of first 16 registers
+ */
+
+ mov a2, a6
+ mov a3, a5
+
+2: rotw -1 # a0..a3 become a4..a7
+ addi a3, a7, -4*4 # next iteration
+ addi a2, a6, -16 # decrementing Y in WMASK
+ l32i a4, a3, PT_AREG_END + 0
+ l32i a5, a3, PT_AREG_END + 4
+ l32i a6, a3, PT_AREG_END + 8
+ l32i a7, a3, PT_AREG_END + 12
+ _bgeui a2, 16, 2b
+
+ /* Clear unrestored registers (don't leak anything to user-land */
+
+1: rsr a0, windowbase
+ rsr a3, sar
+ sub a3, a0, a3
+ beqz a3, 2f
+ extui a3, a3, 0, WBBITS
+
+1: rotw -1
+ addi a3, a7, -1
+ movi a4, 0
+ movi a5, 0
+ movi a6, 0
+ movi a7, 0
+ bgei a3, 1, 1b
+
+ /* We are back were we were when we started.
+ * Note: a2 still contains WMASK (if we've returned to the original
+ * frame where we had loaded a2), or at least the lower 4 bits
+ * (if we have restored WSBITS-1 frames).
+ */
+
+2:
+#if XCHAL_HAVE_THREADPTR
+ l32i a3, a1, PT_THREADPTR
+ wur a3, threadptr
+#endif
+
+ j common_exception_exit
+
+ /* This is the kernel exception exit.
+ * We avoided to do a MOVSP when we entered the exception, but we
+ * have to do it here.
+ */
+
+kernel_exception_exit:
+
+ /* Check if we have to do a movsp.
+ *
+ * We only have to do a movsp if the previous window-frame has
+ * been spilled to the *temporary* exception stack instead of the
+ * task's stack. This is the case if the corresponding bit in
+ * WINDOWSTART for the previous window-frame was set before
+ * (not spilled) but is zero now (spilled).
+ * If this bit is zero, all other bits except the one for the
+ * current window frame are also zero. So, we can use a simple test:
+ * 'and' WINDOWSTART and WINDOWSTART-1:
+ *
+ * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
+ *
+ * The result is zero only if one bit was set.
+ *
+ * (Note: We might have gone through several task switches before
+ * we come back to the current task, so WINDOWBASE might be
+ * different from the time the exception occurred.)
+ */
+
+ /* Test WINDOWSTART before and after the exception.
+ * We actually have WMASK, so we only have to test if it is 1 or not.
+ */
+
+ l32i a2, a1, PT_WMASK
+ _beqi a2, 1, common_exception_exit # Spilled before exception,jump
+
+ /* Test WINDOWSTART now. If spilled, do the movsp */
+
+ rsr a3, windowstart
+ addi a0, a3, -1
+ and a3, a3, a0
+ _bnez a3, common_exception_exit
+
+ /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
+
+ addi a0, a1, -16
+ l32i a3, a0, 0
+ l32i a4, a0, 4
+ s32i a3, a1, PT_SIZE+0
+ s32i a4, a1, PT_SIZE+4
+ l32i a3, a0, 8
+ l32i a4, a0, 12
+ s32i a3, a1, PT_SIZE+8
+ s32i a4, a1, PT_SIZE+12
+
+ /* Common exception exit.
+ * We restore the special register and the current window frame, and
+ * return from the exception.
+ *
+ * Note: We expect a2 to hold PT_WMASK
+ */
+
+common_exception_exit:
+
+ /* Restore address registers. */
+
+ _bbsi.l a2, 1, 1f
+ l32i a4, a1, PT_AREG4
+ l32i a5, a1, PT_AREG5
+ l32i a6, a1, PT_AREG6
+ l32i a7, a1, PT_AREG7
+ _bbsi.l a2, 2, 1f
+ l32i a8, a1, PT_AREG8
+ l32i a9, a1, PT_AREG9
+ l32i a10, a1, PT_AREG10
+ l32i a11, a1, PT_AREG11
+ _bbsi.l a2, 3, 1f
+ l32i a12, a1, PT_AREG12
+ l32i a13, a1, PT_AREG13
+ l32i a14, a1, PT_AREG14
+ l32i a15, a1, PT_AREG15
+
+ /* Restore PC, SAR */
+
+1: l32i a2, a1, PT_PC
+ l32i a3, a1, PT_SAR
+ wsr a2, epc1
+ wsr a3, sar
+
+ /* Restore LBEG, LEND, LCOUNT */
+#if XCHAL_HAVE_LOOPS
+ l32i a2, a1, PT_LBEG
+ l32i a3, a1, PT_LEND
+ wsr a2, lbeg
+ l32i a2, a1, PT_LCOUNT
+ wsr a3, lend
+ wsr a2, lcount
+#endif
+
+ /* We control single stepping through the ICOUNTLEVEL register. */
+
+ l32i a2, a1, PT_ICOUNTLEVEL
+ movi a3, -2
+ wsr a2, icountlevel
+ wsr a3, icount
+
+ /* Check if it was double exception. */
+
+ l32i a0, a1, PT_DEPC
+ l32i a3, a1, PT_AREG3
+ l32i a2, a1, PT_AREG2
+ _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+ /* Restore a0...a3 and return */
+
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+ rfe
+
+1: wsr a0, depc
+ l32i a0, a1, PT_AREG0
+ l32i a1, a1, PT_AREG1
+ rfde
+
+ENDPROC(kernel_exception)
+
+/*
+ * Debug exception handler.
+ *
+ * Currently, we don't support KGDB, so only user application can be debugged.
+ *
+ * When we get here, a0 is trashed and saved to excsave[debuglevel]
+ */
+
+ .literal_position
+
+ENTRY(debug_exception)
+
+ rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
+ bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
+
+ /* Set EPC1 and EXCCAUSE */
+
+ wsr a2, depc # save a2 temporarily
+ rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
+ wsr a2, epc1
+
+ movi a2, EXCCAUSE_MAPPED_DEBUG
+ wsr a2, exccause
+
+ /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
+
+ movi a2, 1 << PS_EXCM_BIT
+ or a2, a0, a2
+ wsr a2, ps
+
+ /* Switch to kernel/user stack, restore jump vector, and save a0 */
+
+ bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
+
+ addi a2, a1, -16-PT_SIZE # assume kernel stack
+3:
+ l32i a0, a3, DT_DEBUG_SAVE
+ s32i a1, a2, PT_AREG1
+ s32i a0, a2, PT_AREG0
+ movi a0, 0
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+ xsr a0, depc
+ s32i a3, a2, PT_AREG3
+ s32i a0, a2, PT_AREG2
+ mov a1, a2
+
+ /* Debug exception is handled as an exception, so interrupts will
+ * likely be enabled in the common exception handler. Disable
+ * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
+ * meaning.
+ */
+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
+ GET_THREAD_INFO(a2, a1)
+ l32i a3, a2, TI_PRE_COUNT
+ addi a3, a3, 1
+ s32i a3, a2, TI_PRE_COUNT
+#endif
+
+ rsr a2, ps
+ bbsi.l a2, PS_UM_BIT, _user_exception
+ j _kernel_exception
+
+2: rsr a2, excsave1
+ l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
+ j 3b
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ /* Debug exception while in exception mode. This may happen when
+ * window overflow/underflow handler or fast exception handler hits
+ * data breakpoint, in which case save and disable all data
+ * breakpoints, single-step faulting instruction and restore data
+ * breakpoints.
+ */
+1:
+ bbci.l a0, PS_UM_BIT, 1b # jump if kernel mode
+
+ rsr a0, debugcause
+ bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
+
+ .set _index, 0
+ .rept XCHAL_NUM_DBREAK
+ l32i a0, a3, DT_DBREAKC_SAVE + _index * 4
+ wsr a0, SREG_DBREAKC + _index
+ .set _index, _index + 1
+ .endr
+
+ l32i a0, a3, DT_ICOUNT_LEVEL_SAVE
+ wsr a0, icountlevel
+
+ l32i a0, a3, DT_ICOUNT_SAVE
+ xsr a0, icount
+
+ l32i a0, a3, DT_DEBUG_SAVE
+ xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+ rfi XCHAL_DEBUGLEVEL
+
+.Ldebug_save_dbreak:
+ .set _index, 0
+ .rept XCHAL_NUM_DBREAK
+ movi a0, 0
+ xsr a0, SREG_DBREAKC + _index
+ s32i a0, a3, DT_DBREAKC_SAVE + _index * 4
+ .set _index, _index + 1
+ .endr
+
+ movi a0, XCHAL_EXCM_LEVEL + 1
+ xsr a0, icountlevel
+ s32i a0, a3, DT_ICOUNT_LEVEL_SAVE
+
+ movi a0, 0xfffffffe
+ xsr a0, icount
+ s32i a0, a3, DT_ICOUNT_SAVE
+
+ l32i a0, a3, DT_DEBUG_SAVE
+ xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+ rfi XCHAL_DEBUGLEVEL
+#else
+ /* Debug exception while in exception mode. Should not happen. */
+1: j 1b // FIXME!!
+#endif
+
+ENDPROC(debug_exception)
+
+/*
+ * We get here in case of an unrecoverable exception.
+ * The only thing we can do is to be nice and print a panic message.
+ * We only produce a single stack frame for panic, so ???
+ *
+ *
+ * Entry conditions:
+ *
+ * - a0 contains the caller address; original value saved in excsave1.
+ * - the original a0 contains a valid return address (backtrace) or 0.
+ * - a2 contains a valid stackpointer
+ *
+ * Notes:
+ *
+ * - If the stack pointer could be invalid, the caller has to setup a
+ * dummy stack pointer (e.g. the stack of the init_task)
+ *
+ * - If the return address could be invalid, the caller has to set it
+ * to 0, so the backtrace would stop.
+ *
+ */
+ .align 4
+unrecoverable_text:
+ .ascii "Unrecoverable error in exception handler\0"
+
+ .literal_position
+
+ENTRY(unrecoverable_exception)
+
+ movi a0, 1
+ movi a1, 0
+
+ wsr a0, windowstart
+ wsr a1, windowbase
+ rsync
+
+ movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+ movi a1, init_task
+ movi a0, 0
+ addi a1, a1, PT_REGS_OFFSET
+
+ movi a6, unrecoverable_text
+ call4 panic
+
+1: j 1b
+
+ENDPROC(unrecoverable_exception)
+
+/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
+
+/*
+ * Fast-handler for alloca exceptions
+ *
+ * The ALLOCA handler is entered when user code executes the MOVSP
+ * instruction and the caller's frame is not in the register file.
+ *
+ * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
+ *
+ * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
+ *
+ * It leverages the existing window spill/fill routines and their support for
+ * double exceptions. The 'movsp' instruction will only cause an exception if
+ * the next window needs to be loaded. In fact this ALLOCA exception may be
+ * replaced at some point by changing the hardware to do a underflow exception
+ * of the proper size instead.
+ *
+ * This algorithm simply backs out the register changes started by the user
+ * excpetion handler, makes it appear that we have started a window underflow
+ * by rotating the window back and then setting the old window base (OWB) in
+ * the 'ps' register with the rolled back window base. The 'movsp' instruction
+ * will be re-executed and this time since the next window frames is in the
+ * active AR registers it won't cause an exception.
+ *
+ * If the WindowUnderflow code gets a TLB miss the page will get mapped
+ * the the partial windeowUnderflow will be handeled in the double exception
+ * handler.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_alloca)
+ rsr a0, windowbase
+ rotw -1
+ rsr a2, ps
+ extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
+ xor a3, a3, a4
+ l32i a4, a6, PT_AREG0
+ l32i a1, a6, PT_DEPC
+ rsr a6, depc
+ wsr a1, depc
+ slli a3, a3, PS_OWB_SHIFT
+ xor a2, a2, a3
+ wsr a2, ps
+ rsync
+
+ _bbci.l a4, 31, 4f
+ rotw -1
+ _bbci.l a8, 30, 8f
+ rotw -1
+ j _WindowUnderflow12
+8: j _WindowUnderflow8
+4: j _WindowUnderflow4
+ENDPROC(fast_alloca)
+
+/*
+ * fast system calls.
+ *
+ * WARNING: The kernel doesn't save the entire user context before
+ * handling a fast system call. These functions are small and short,
+ * usually offering some functionality not available to user tasks.
+ *
+ * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ */
+
+ENTRY(fast_syscall_kernel)
+
+ /* Skip syscall. */
+
+ rsr a0, epc1
+ addi a0, a0, 3
+ wsr a0, epc1
+
+ l32i a0, a2, PT_DEPC
+ bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
+
+ rsr a0, depc # get syscall-nr
+ _beqz a0, fast_syscall_spill_registers
+ _beqi a0, __NR_xtensa, fast_syscall_xtensa
+
+ j kernel_exception
+
+ENDPROC(fast_syscall_kernel)
+
+ENTRY(fast_syscall_user)
+
+ /* Skip syscall. */
+
+ rsr a0, epc1
+ addi a0, a0, 3
+ wsr a0, epc1
+
+ l32i a0, a2, PT_DEPC
+ bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
+
+ rsr a0, depc # get syscall-nr
+ _beqz a0, fast_syscall_spill_registers
+ _beqi a0, __NR_xtensa, fast_syscall_xtensa
+
+ j user_exception
+
+ENDPROC(fast_syscall_user)
+
+ENTRY(fast_syscall_unrecoverable)
+
+ /* Restore all states. */
+
+ l32i a0, a2, PT_AREG0 # restore a0
+ xsr a2, depc # restore a2, depc
+
+ wsr a0, excsave1
+ call0 unrecoverable_exception
+
+ENDPROC(fast_syscall_unrecoverable)
+
+/*
+ * sysxtensa syscall handler
+ *
+ * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
+ * a2 a6 a3 a4 a5
+ *
+ * Entry condition:
+ *
+ * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in a0 and DEPC
+ * a3: a3
+ * a4..a15: unchanged
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Note: we don't have to save a2; a2 holds the return value
+ */
+
+ .literal_position
+
+#ifdef CONFIG_FAST_SYSCALL_XTENSA
+
+ENTRY(fast_syscall_xtensa)
+
+ s32i a7, a2, PT_AREG7 # we need an additional register
+ movi a7, 4 # sizeof(unsigned int)
+ access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
+
+ _bgeui a6, SYS_XTENSA_COUNT, .Lill
+ _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
+
+ /* Fall through for ATOMIC_CMP_SWP. */
+
+.Lswp: /* Atomic compare and swap */
+
+EX(.Leac) l32i a0, a3, 0 # read old value
+ bne a0, a4, 1f # same as old value? jump
+EX(.Leac) s32i a5, a3, 0 # different, modify value
+ l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, 1 # and return 1
+ rfe
+
+1: l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, 0 # return 0 (note that we cannot set
+ rfe
+
+.Lnswp: /* Atomic set, add, and exg_add. */
+
+EX(.Leac) l32i a7, a3, 0 # orig
+ addi a6, a6, -SYS_XTENSA_ATOMIC_SET
+ add a0, a4, a7 # + arg
+ moveqz a0, a4, a6 # set
+ addi a6, a6, SYS_XTENSA_ATOMIC_SET
+EX(.Leac) s32i a0, a3, 0 # write new value
+
+ mov a0, a2
+ mov a2, a7
+ l32i a7, a0, PT_AREG7 # restore a7
+ l32i a0, a0, PT_AREG0 # restore a0
+ rfe
+
+.Leac: l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, -EFAULT
+ rfe
+
+.Lill: l32i a7, a2, PT_AREG7 # restore a7
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, -EINVAL
+ rfe
+
+ENDPROC(fast_syscall_xtensa)
+
+#else /* CONFIG_FAST_SYSCALL_XTENSA */
+
+ENTRY(fast_syscall_xtensa)
+
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, -ENOSYS
+ rfe
+
+ENDPROC(fast_syscall_xtensa)
+
+#endif /* CONFIG_FAST_SYSCALL_XTENSA */
+
+
+/* fast_syscall_spill_registers.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
+ */
+
+#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS
+
+ENTRY(fast_syscall_spill_registers)
+
+ /* Register a FIXUP handler (pass current wb as a parameter) */
+
+ xsr a3, excsave1
+ movi a0, fast_syscall_spill_registers_fixup
+ s32i a0, a3, EXC_TABLE_FIXUP
+ rsr a0, windowbase
+ s32i a0, a3, EXC_TABLE_PARAM
+ xsr a3, excsave1 # restore a3 and excsave_1
+
+ /* Save a3, a4 and SAR on stack. */
+
+ rsr a0, sar
+ s32i a3, a2, PT_AREG3
+ s32i a0, a2, PT_SAR
+
+ /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
+
+ s32i a4, a2, PT_AREG4
+ s32i a7, a2, PT_AREG7
+ s32i a8, a2, PT_AREG8
+ s32i a11, a2, PT_AREG11
+ s32i a12, a2, PT_AREG12
+ s32i a15, a2, PT_AREG15
+
+ /*
+ * Rotate ws so that the current windowbase is at bit 0.
+ * Assume ws = xxxwww1yy (www1 current window frame).
+ * Rotate ws right so that a4 = yyxxxwww1.
+ */
+
+ rsr a0, windowbase
+ rsr a3, windowstart # a3 = xxxwww1yy
+ ssr a0 # holds WB
+ slli a0, a3, WSBITS
+ or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy
+ srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
+
+ /* We are done if there are no more than the current register frame. */
+
+ extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
+ movi a0, (1 << (WSBITS-1))
+ _beqz a3, .Lnospill # only one active frame? jump
+
+ /* We want 1 at the top, so that we return to the current windowbase */
+
+ or a3, a3, a0 # 1yyxxxwww
+
+ /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
+
+ wsr a3, windowstart # save shifted windowstart
+ neg a0, a3
+ and a3, a0, a3 # first bit set from right: 000010000
+
+ ffs_ws a0, a3 # a0: shifts to skip empty frames
+ movi a3, WSBITS
+ sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right
+ ssr a0 # save in SAR for later.
+
+ rsr a3, windowbase
+ add a3, a3, a0
+ wsr a3, windowbase
+ rsync
+
+ rsr a3, windowstart
+ srl a3, a3 # shift windowstart
+
+ /* WB is now just one frame below the oldest frame in the register
+ window. WS is shifted so the oldest frame is in bit 0, thus, WB
+ and WS differ by one 4-register frame. */
+
+ /* Save frames. Depending what call was used (call4, call8, call12),
+ * we have to save 4,8. or 12 registers.
+ */
+
+
+.Lloop: _bbsi.l a3, 1, .Lc4
+ _bbci.l a3, 2, .Lc12
+
+.Lc8: s32e a4, a13, -16
+ l32e a4, a5, -12
+ s32e a8, a4, -32
+ s32e a5, a13, -12
+ s32e a6, a13, -8
+ s32e a7, a13, -4
+ s32e a9, a4, -28
+ s32e a10, a4, -24
+ s32e a11, a4, -20
+ srli a11, a3, 2 # shift windowbase by 2
+ rotw 2
+ _bnei a3, 1, .Lloop
+ j .Lexit
+
+.Lc4: s32e a4, a9, -16
+ s32e a5, a9, -12
+ s32e a6, a9, -8
+ s32e a7, a9, -4
+
+ srli a7, a3, 1
+ rotw 1
+ _bnei a3, 1, .Lloop
+ j .Lexit
+
+.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
+
+ /* 12-register frame (call12) */
+
+ l32e a0, a5, -12
+ s32e a8, a0, -48
+ mov a8, a0
+
+ s32e a9, a8, -44
+ s32e a10, a8, -40
+ s32e a11, a8, -36
+ s32e a12, a8, -32
+ s32e a13, a8, -28
+ s32e a14, a8, -24
+ s32e a15, a8, -20
+ srli a15, a3, 3
+
+ /* The stack pointer for a4..a7 is out of reach, so we rotate the
+ * window, grab the stackpointer, and rotate back.
+ * Alternatively, we could also use the following approach, but that
+ * makes the fixup routine much more complicated:
+ * rotw 1
+ * s32e a0, a13, -16
+ * ...
+ * rotw 2
+ */
+
+ rotw 1
+ mov a4, a13
+ rotw -1
+
+ s32e a4, a8, -16
+ s32e a5, a8, -12
+ s32e a6, a8, -8
+ s32e a7, a8, -4
+
+ rotw 3
+
+ _beqi a3, 1, .Lexit
+ j .Lloop
+
+.Lexit:
+
+ /* Done. Do the final rotation and set WS */
+
+ rotw 1
+ rsr a3, windowbase
+ ssl a3
+ movi a3, 1
+ sll a3, a3
+ wsr a3, windowstart
+.Lnospill:
+
+ /* Advance PC, restore registers and SAR, and return from exception. */
+
+ l32i a3, a2, PT_SAR
+ l32i a0, a2, PT_AREG0
+ wsr a3, sar
+ l32i a3, a2, PT_AREG3
+
+ /* Restore clobbered registers. */
+
+ l32i a4, a2, PT_AREG4
+ l32i a7, a2, PT_AREG7
+ l32i a8, a2, PT_AREG8
+ l32i a11, a2, PT_AREG11
+ l32i a12, a2, PT_AREG12
+ l32i a15, a2, PT_AREG15
+
+ movi a2, 0
+ rfe
+
+.Linvalid_mask:
+
+ /* We get here because of an unrecoverable error in the window
+ * registers, so set up a dummy frame and kill the user application.
+ * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
+ */
+
+ movi a0, 1
+ movi a1, 0
+
+ wsr a0, windowstart
+ wsr a1, windowbase
+ rsync
+
+ movi a0, 0
+
+ rsr a3, excsave1
+ l32i a1, a3, EXC_TABLE_KSTK
+
+ movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
+ wsr a4, ps
+ rsync
+
+ movi a6, SIGSEGV
+ call4 do_exit
+
+ /* shouldn't return, so panic */
+
+ wsr a0, excsave1
+ call0 unrecoverable_exception # should not return
+1: j 1b
+
+
+ENDPROC(fast_syscall_spill_registers)
+
+/* Fixup handler.
+ *
+ * We get here if the spill routine causes an exception, e.g. tlb miss.
+ * We basically restore WINDOWBASE and WINDOWSTART to the condition when
+ * we entered the spill routine and jump to the user exception handler.
+ *
+ * Note that we only need to restore the bits in windowstart that have not
+ * been spilled yet by the _spill_register routine. Luckily, a3 contains a
+ * rotated windowstart with only those bits set for frames that haven't been
+ * spilled yet. Because a3 is rotated such that bit 0 represents the register
+ * frame for the current windowbase - 1, we need to rotate a3 left by the
+ * value of the current windowbase + 1 and move it to windowstart.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(fast_syscall_spill_registers_fixup)
+
+ rsr a2, windowbase # get current windowbase (a2 is saved)
+ xsr a0, depc # restore depc and a0
+ ssl a2 # set shift (32 - WB)
+
+ /* We need to make sure the current registers (a0-a3) are preserved.
+ * To do this, we simply set the bit for the current window frame
+ * in WS, so that the exception handlers save them to the task stack.
+ *
+ * Note: we use a3 to set the windowbase, so we take a special care
+ * of it, saving it in the original _spill_registers frame across
+ * the exception handler call.
+ */
+
+ xsr a3, excsave1 # get spill-mask
+ slli a3, a3, 1 # shift left by one
+ addi a3, a3, 1 # set the bit for the current window frame
+
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
+ wsr a2, windowstart # set corrected windowstart
+
+ srli a3, a3, 1
+ rsr a2, excsave1
+ l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
+ xsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
+ l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
+ xsr a2, excsave1
+
+ /* Return to the original (user task) WINDOWBASE.
+ * We leave the following frame behind:
+ * a0, a1, a2 same
+ * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
+ * depc: depc (we have to return to that address)
+ * excsave_1: exctable
+ */
+
+ wsr a3, windowbase
+ rsync
+
+ /* We are now in the original frame when we entered _spill_registers:
+ * a0: return address
+ * a1: used, stack pointer
+ * a2: kernel stack pointer
+ * a3: available
+ * depc: exception address
+ * excsave: exctable
+ * Note: This frame might be the same as above.
+ */
+
+ /* Setup stack pointer. */
+
+ addi a2, a2, -PT_USER_SIZE
+ s32i a0, a2, PT_AREG0
+
+ /* Make sure we return to this fixup handler. */
+
+ movi a3, fast_syscall_spill_registers_fixup_return
+ s32i a3, a2, PT_DEPC # setup depc
+
+ /* Jump to the exception handler. */
+
+ rsr a3, excsave1
+ rsr a0, exccause
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+ jx a0
+
+ENDPROC(fast_syscall_spill_registers_fixup)
+
+ENTRY(fast_syscall_spill_registers_fixup_return)
+
+ /* When we return here, all registers have been restored (a2: DEPC) */
+
+ wsr a2, depc # exception address
+
+ /* Restore fixup handler. */
+
+ rsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
+ movi a3, fast_syscall_spill_registers_fixup
+ s32i a3, a2, EXC_TABLE_FIXUP
+ rsr a3, windowbase
+ s32i a3, a2, EXC_TABLE_PARAM
+ l32i a2, a2, EXC_TABLE_KSTK
+
+ /* Load WB at the time the exception occurred. */
+
+ rsr a3, sar # WB is still in SAR
+ neg a3, a3
+ wsr a3, windowbase
+ rsync
+
+ rsr a3, excsave1
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+
+ rfde
+
+ENDPROC(fast_syscall_spill_registers_fixup_return)
+
+#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
+
+ENTRY(fast_syscall_spill_registers)
+
+ l32i a0, a2, PT_AREG0 # restore a0
+ movi a2, -ENOSYS
+ rfe
+
+ENDPROC(fast_syscall_spill_registers)
+
+#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
+
+#ifdef CONFIG_MMU
+/*
+ * We should never get here. Bail out!
+ */
+
+ENTRY(fast_second_level_miss_double_kernel)
+
+1:
+ call0 unrecoverable_exception # should not return
+1: j 1b
+
+ENDPROC(fast_second_level_miss_double_kernel)
+
+/* First-level entry handler for user, kernel, and double 2nd-level
+ * TLB miss exceptions. Note that for now, user and kernel miss
+ * exceptions share the same entry point and are handled identically.
+ *
+ * An old, less-efficient C version of this function used to exist.
+ * We include it below, interleaved as comments, for reference.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_second_level_miss)
+
+ /* Save a1 and a3. Note: we don't expect a double exception. */
+
+ s32i a1, a2, PT_AREG1
+ s32i a3, a2, PT_AREG3
+
+ /* We need to map the page of PTEs for the user task. Find
+ * the pointer to that page. Also, it's possible for tsk->mm
+ * to be NULL while tsk->active_mm is nonzero if we faulted on
+ * a vmalloc address. In that rare case, we must use
+ * active_mm instead to avoid a fault in this handler. See
+ *
+ * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
+ * (or search Internet on "mm vs. active_mm")
+ *
+ * if (!mm)
+ * mm = tsk->active_mm;
+ * pgd = pgd_offset (mm, regs->excvaddr);
+ * pmd = pmd_offset (pgd, regs->excvaddr);
+ * pmdval = *pmd;
+ */
+
+ GET_CURRENT(a1,a2)
+ l32i a0, a1, TASK_MM # tsk->mm
+ beqz a0, 9f
+
+8: rsr a3, excvaddr # fault address
+ _PGD_OFFSET(a0, a3, a1)
+ l32i a0, a0, 0 # read pmdval
+ beqz a0, 2f
+
+ /* Read ptevaddr and convert to top of page-table page.
+ *
+ * vpnval = read_ptevaddr_register() & PAGE_MASK;
+ * vpnval += DTLB_WAY_PGTABLE;
+ * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
+ * write_dtlb_entry (pteval, vpnval);
+ *
+ * The messy computation for 'pteval' above really simplifies
+ * into the following:
+ *
+ * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
+ * | PAGE_DIRECTORY
+ */
+
+ movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
+ add a0, a0, a1 # pmdval - PAGE_OFFSET
+ extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
+ xor a0, a0, a1
+
+ movi a1, _PAGE_DIRECTORY
+ or a0, a0, a1 # ... | PAGE_DIRECTORY
+
+ /*
+ * We utilize all three wired-ways (7-9) to hold pmd translations.
+ * Memory regions are mapped to the DTLBs according to bits 28 and 29.
+ * This allows to map the three most common regions to three different
+ * DTLBs:
+ * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
+ * 2 -> way 8 shared libaries (2000.0000)
+ * 3 -> way 0 stack (3000.0000)
+ */
+
+ extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
+ rsr a1, ptevaddr
+ addx2 a3, a3, a3 # -> 0,3,6,9
+ srli a1, a1, PAGE_SHIFT
+ extui a3, a3, 2, 2 # -> 0,0,1,2
+ slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
+ addi a3, a3, DTLB_WAY_PGD
+ add a1, a1, a3 # ... + way_number
+
+3: wdtlb a0, a1
+ dsync
+
+ /* Exit critical section. */
+
+4: rsr a3, excsave1
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+
+ /* Restore the working registers, and return. */
+
+ l32i a0, a2, PT_AREG0
+ l32i a1, a2, PT_AREG1
+ l32i a3, a2, PT_AREG3
+ l32i a2, a2, PT_DEPC
+
+ bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+ /* Restore excsave1 and return. */
+
+ rsr a2, depc
+ rfe
+
+ /* Return from double exception. */
+
+1: xsr a2, depc
+ esync
+ rfde
+
+9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ bnez a0, 8b
+
+ /* Even more unlikely case active_mm == 0.
+ * We can get here with NMI in the middle of context_switch that
+ * touches vmalloc area.
+ */
+ movi a0, init_mm
+ j 8b
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+2: /* Special case for cache aliasing.
+ * We (should) only get here if a clear_user_page, copy_user_page
+ * or the aliased cache flush functions got preemptively interrupted
+ * by another task. Re-establish temporary mapping to the
+ * TLBTEMP_BASE areas.
+ */
+
+ /* We shouldn't be in a double exception */
+
+ l32i a0, a2, PT_DEPC
+ bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
+
+ /* Make sure the exception originated in the special functions */
+
+ movi a0, __tlbtemp_mapping_start
+ rsr a3, epc1
+ bltu a3, a0, 2f
+ movi a0, __tlbtemp_mapping_end
+ bgeu a3, a0, 2f
+
+ /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
+
+ movi a3, TLBTEMP_BASE_1
+ rsr a0, excvaddr
+ bltu a0, a3, 2f
+
+ addi a1, a0, -TLBTEMP_SIZE
+ bgeu a1, a3, 2f
+
+ /* Check if we have to restore an ITLB mapping. */
+
+ movi a1, __tlbtemp_mapping_itlb
+ rsr a3, epc1
+ sub a3, a3, a1
+
+ /* Calculate VPN */
+
+ movi a1, PAGE_MASK
+ and a1, a1, a0
+
+ /* Jump for ITLB entry */
+
+ bgez a3, 1f
+
+ /* We can use up to two TLBTEMP areas, one for src and one for dst. */
+
+ extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
+ add a1, a3, a1
+
+ /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
+
+ mov a0, a6
+ movnez a0, a7, a3
+ j 3b
+
+ /* ITLB entry. We only use dst in a6. */
+
+1: witlb a6, a1
+ isync
+ j 4b
+
+
+#endif // DCACHE_WAY_SIZE > PAGE_SIZE
+
+
+2: /* Invalid PGD, default exception handling */
+
+ rsr a1, depc
+ s32i a1, a2, PT_AREG2
+ mov a1, a2
+
+ rsr a2, ps
+ bbsi.l a2, PS_UM_BIT, 1f
+ j _kernel_exception
+1: j _user_exception
+
+ENDPROC(fast_second_level_miss)
+
+/*
+ * StoreProhibitedException
+ *
+ * Update the pte and invalidate the itlb mapping for this pte.
+ *
+ * Entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original in DEPC
+ * a3: a3
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: dispatch table
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ */
+
+ENTRY(fast_store_prohibited)
+
+ /* Save a1 and a3. */
+
+ s32i a1, a2, PT_AREG1
+ s32i a3, a2, PT_AREG3
+
+ GET_CURRENT(a1,a2)
+ l32i a0, a1, TASK_MM # tsk->mm
+ beqz a0, 9f
+
+8: rsr a1, excvaddr # fault address
+ _PGD_OFFSET(a0, a1, a3)
+ l32i a0, a0, 0
+ beqz a0, 2f
+
+ /*
+ * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
+ * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
+ */
+
+ _PTE_OFFSET(a0, a1, a3)
+ l32i a3, a0, 0 # read pteval
+ movi a1, _PAGE_CA_INVALID
+ ball a3, a1, 2f
+ bbci.l a3, _PAGE_WRITABLE_BIT, 2f
+
+ movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
+ or a3, a3, a1
+ rsr a1, excvaddr
+ s32i a3, a0, 0
+
+ /* We need to flush the cache if we have page coloring. */
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+ dhwb a0, 0
+#endif
+ pdtlb a0, a1
+ wdtlb a3, a0
+
+ /* Exit critical section. */
+
+ movi a0, 0
+ rsr a3, excsave1
+ s32i a0, a3, EXC_TABLE_FIXUP
+
+ /* Restore the working registers, and return. */
+
+ l32i a3, a2, PT_AREG3
+ l32i a1, a2, PT_AREG1
+ l32i a0, a2, PT_AREG0
+ l32i a2, a2, PT_DEPC
+
+ bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+ rsr a2, depc
+ rfe
+
+ /* Double exception. Restore FIXUP handler and return. */
+
+1: xsr a2, depc
+ esync
+ rfde
+
+9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ j 8b
+
+2: /* If there was a problem, handle fault in C */
+
+ rsr a3, depc # still holds a2
+ s32i a3, a2, PT_AREG2
+ mov a1, a2
+
+ rsr a2, ps
+ bbsi.l a2, PS_UM_BIT, 1f
+ j _kernel_exception
+1: j _user_exception
+
+ENDPROC(fast_store_prohibited)
+
+#endif /* CONFIG_MMU */
+
+/*
+ * System Calls.
+ *
+ * void system_call (struct pt_regs* regs, int exccause)
+ * a2 a3
+ */
+ .literal_position
+
+ENTRY(system_call)
+
+ entry a1, 32
+
+ /* regs->syscall = regs->areg[2] */
+
+ l32i a3, a2, PT_AREG2
+ mov a6, a2
+ s32i a3, a2, PT_SYSCALL
+ call4 do_syscall_trace_enter
+ mov a3, a6
+
+ /* syscall = sys_call_table[syscall_nr] */
+
+ movi a4, sys_call_table
+ movi a5, __NR_syscall_count
+ movi a6, -ENOSYS
+ bgeu a3, a5, 1f
+
+ addx4 a4, a3, a4
+ l32i a4, a4, 0
+ movi a5, sys_ni_syscall;
+ beq a4, a5, 1f
+
+ /* Load args: arg0 - arg5 are passed via regs. */
+
+ l32i a6, a2, PT_AREG6
+ l32i a7, a2, PT_AREG3
+ l32i a8, a2, PT_AREG4
+ l32i a9, a2, PT_AREG5
+ l32i a10, a2, PT_AREG8
+ l32i a11, a2, PT_AREG9
+
+ /* Pass one additional argument to the syscall: pt_regs (on stack) */
+ s32i a2, a1, 0
+
+ callx4 a4
+
+1: /* regs->areg[2] = return_value */
+
+ s32i a6, a2, PT_AREG2
+ mov a6, a2
+ call4 do_syscall_trace_leave
+ retw
+
+ENDPROC(system_call)
+
+/*
+ * Spill live registers on the kernel stack macro.
+ *
+ * Entry condition: ps.woe is set, ps.excm is cleared
+ * Exit condition: windowstart has single bit set
+ * May clobber: a12, a13
+ */
+ .macro spill_registers_kernel
+
+#if XCHAL_NUM_AREGS > 16
+ call12 1f
+ _j 2f
+ retw
+ .align 4
+1:
+ _entry a1, 48
+ addi a12, a0, 3
+#if XCHAL_NUM_AREGS > 32
+ .rept (XCHAL_NUM_AREGS - 32) / 12
+ _entry a1, 48
+ mov a12, a0
+ .endr
+#endif
+ _entry a1, 16
+#if XCHAL_NUM_AREGS % 12 == 0
+ mov a8, a8
+#elif XCHAL_NUM_AREGS % 12 == 4
+ mov a12, a12
+#elif XCHAL_NUM_AREGS % 12 == 8
+ mov a4, a4
+#endif
+ retw
+2:
+#else
+ mov a12, a12
+#endif
+ .endm
+
+/*
+ * Task switch.
+ *
+ * struct task* _switch_to (struct task* prev, struct task* next)
+ * a2 a2 a3
+ */
+
+ENTRY(_switch_to)
+
+ entry a1, 48
+
+ mov a11, a3 # and 'next' (a3)
+
+ l32i a4, a2, TASK_THREAD_INFO
+ l32i a5, a3, TASK_THREAD_INFO
+
+ save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
+
+#if THREAD_RA > 1020 || THREAD_SP > 1020
+ addi a10, a2, TASK_THREAD
+ s32i a0, a10, THREAD_RA - TASK_THREAD # save return address
+ s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer
+#else
+ s32i a0, a2, THREAD_RA # save return address
+ s32i a1, a2, THREAD_SP # save stack pointer
+#endif
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ movi a6, __stack_chk_guard
+ l32i a8, a3, TASK_STACK_CANARY
+ s32i a8, a6, 0
+#endif
+
+ /* Disable ints while we manipulate the stack pointer. */
+
+ irq_save a14, a3
+ rsync
+
+ /* Switch CPENABLE */
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ l32i a3, a5, THREAD_CPENABLE
+ xsr a3, cpenable
+ s32i a3, a4, THREAD_CPENABLE
+#endif
+
+ /* Flush register file. */
+
+ spill_registers_kernel
+
+ /* Set kernel stack (and leave critical section)
+ * Note: It's save to set it here. The stack will not be overwritten
+ * because the kernel stack will only be loaded again after
+ * we return from kernel space.
+ */
+
+ rsr a3, excsave1 # exc_table
+ addi a7, a5, PT_REGS_OFFSET
+ s32i a7, a3, EXC_TABLE_KSTK
+
+ /* restore context of the task 'next' */
+
+ l32i a0, a11, THREAD_RA # restore return address
+ l32i a1, a11, THREAD_SP # restore stack pointer
+
+ load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
+
+ wsr a14, ps
+ rsync
+
+ retw
+
+ENDPROC(_switch_to)
+
+ENTRY(ret_from_fork)
+
+ /* void schedule_tail (struct task_struct *prev)
+ * Note: prev is still in a6 (return value from fake call4 frame)
+ */
+ call4 schedule_tail
+
+ mov a6, a1
+ call4 do_syscall_trace_leave
+
+ j common_exception_return
+
+ENDPROC(ret_from_fork)
+
+/*
+ * Kernel thread creation helper
+ * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
+ * left from _switch_to: a6 = prev
+ */
+ENTRY(ret_from_kernel_thread)
+
+ call4 schedule_tail
+ mov a6, a3
+ callx4 a2
+ j common_exception_return
+
+ENDPROC(ret_from_kernel_thread)
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
new file mode 100644
index 000000000..5bd38ea2d
--- /dev/null
+++ b/arch/xtensa/kernel/head.S
@@ -0,0 +1,379 @@
+/*
+ * arch/xtensa/kernel/head.S
+ *
+ * Xtensa Processor startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Kevin Chea
+ */
+
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+/*
+ * This module contains the entry code for kernel images. It performs the
+ * minimal setup needed to call the generic C routines.
+ *
+ * Prerequisites:
+ *
+ * - The kernel image has been loaded to the actual address where it was
+ * compiled to.
+ * - a2 contains either 0 or a pointer to a list of boot parameters.
+ * (see setup.c for more details)
+ *
+ */
+
+/*
+ * _start
+ *
+ * The bootloader passes a pointer to a list of boot parameters in a2.
+ */
+
+ /* The first bytes of the kernel image must be an instruction, so we
+ * manually allocate and define the literal constant we need for a jx
+ * instruction.
+ */
+
+ __HEAD
+ .begin no-absolute-literals
+
+ENTRY(_start)
+
+ /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+ wsr a2, excsave1
+ _j _SetupOCD
+
+ .align 4
+ .literal_position
+.Lstartup:
+ .word _startup
+
+ .align 4
+_SetupOCD:
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+ .global _SetupMMU
+_SetupMMU:
+ Offset = _SetupMMU - _start
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ rsr a2, excsave1
+ movi a3, XCHAL_KSEG_PADDR
+ bltu a2, a3, 1f
+ sub a2, a2, a3
+ movi a3, XCHAL_KSEG_SIZE
+ bgeu a2, a3, 1f
+ movi a3, XCHAL_KSEG_CACHED_VADDR
+ add a2, a2, a3
+ wsr a2, excsave1
+1:
+#endif
+#endif
+ .end no-absolute-literals
+
+ l32r a0, .Lstartup
+ jx a0
+
+ENDPROC(_start)
+
+ __REF
+ .literal_position
+
+ENTRY(_startup)
+
+ /* Set a0 to 0 for the remaining initialization. */
+
+ movi a0, 0
+
+#if XCHAL_HAVE_VECBASE
+ movi a2, VECBASE_VADDR
+ wsr a2, vecbase
+#endif
+
+ /* Clear debugging registers. */
+
+#if XCHAL_HAVE_DEBUG
+#if XCHAL_NUM_IBREAK > 0
+ wsr a0, ibreakenable
+#endif
+ wsr a0, icount
+ movi a1, 15
+ wsr a0, icountlevel
+
+ .set _index, 0
+ .rept XCHAL_NUM_DBREAK
+ wsr a0, SREG_DBREAKC + _index
+ .set _index, _index + 1
+ .endr
+#endif
+
+ /* Clear CCOUNT (not really necessary, but nice) */
+
+ wsr a0, ccount # not really necessary, but nice
+
+ /* Disable zero-loops. */
+
+#if XCHAL_HAVE_LOOPS
+ wsr a0, lcount
+#endif
+
+ /* Disable all timers. */
+
+ .set _index, 0
+ .rept XCHAL_NUM_TIMERS
+ wsr a0, SREG_CCOMPARE + _index
+ .set _index, _index + 1
+ .endr
+
+ /* Interrupt initialization. */
+
+ movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
+ wsr a0, intenable
+ wsr a2, intclear
+
+ /* Disable coprocessors. */
+
+#if XCHAL_HAVE_CP
+ wsr a0, cpenable
+#endif
+
+ /* Initialize the caches.
+ * a2, a3 are just working registers (clobbered).
+ */
+
+#if XCHAL_DCACHE_LINE_LOCKABLE
+ ___unlock_dcache_all a2 a3
+#endif
+
+#if XCHAL_ICACHE_LINE_LOCKABLE
+ ___unlock_icache_all a2 a3
+#endif
+
+ ___invalidate_dcache_all a2 a3
+ ___invalidate_icache_all a2 a3
+
+ isync
+
+ initialize_cacheattr
+
+#ifdef CONFIG_HAVE_SMP
+ movi a2, CCON # MX External Register to Configure Cache
+ movi a3, 1
+ wer a3, a2
+#endif
+
+ /* Setup stack and enable window exceptions (keep irqs disabled) */
+
+ movi a1, start_info
+ l32i a1, a1, 0
+
+ movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
+ # WOE=1, INTLEVEL=LOCKLEVEL, UM=0
+ wsr a2, ps # (enable reg-windows; progmode stack)
+ rsync
+
+#ifdef CONFIG_SMP
+ /*
+ * Notice that we assume with SMP that cores have PRID
+ * supported by the cores.
+ */
+ rsr a2, prid
+ bnez a2, .Lboot_secondary
+
+#endif /* CONFIG_SMP */
+
+ /* Unpack data sections
+ *
+ * The linker script used to build the Linux kernel image
+ * creates a table located at __boot_reloc_table_start
+ * that contans the information what data needs to be unpacked.
+ *
+ * Uses a2-a7.
+ */
+
+ movi a2, __boot_reloc_table_start
+ movi a3, __boot_reloc_table_end
+
+1: beq a2, a3, 3f # no more entries?
+ l32i a4, a2, 0 # start destination (in RAM)
+ l32i a5, a2, 4 # end desination (in RAM)
+ l32i a6, a2, 8 # start source (in ROM)
+ addi a2, a2, 12 # next entry
+ beq a4, a5, 1b # skip, empty entry
+ beq a4, a6, 1b # skip, source and dest. are the same
+
+2: l32i a7, a6, 0 # load word
+ addi a6, a6, 4
+ s32i a7, a4, 0 # store word
+ addi a4, a4, 4
+ bltu a4, a5, 2b
+ j 1b
+
+3:
+ /* All code and initialized data segments have been copied.
+ * Now clear the BSS segment.
+ */
+
+ movi a2, __bss_start # start of BSS
+ movi a3, __bss_stop # end of BSS
+
+ __loopt a2, a3, a4, 2
+ s32i a0, a2, 0
+ __endla a2, a3, 4
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+
+ /* After unpacking, flush the writeback cache to memory so the
+ * instructions/data are available.
+ */
+
+ ___flush_dcache_all a2 a3
+#endif
+ memw
+ isync
+ ___invalidate_icache_all a2 a3
+ isync
+
+ movi a6, 0
+ xsr a6, excsave1
+
+ /* init_arch kick-starts the linux kernel */
+
+ call4 init_arch
+ call4 start_kernel
+
+should_never_return:
+ j should_never_return
+
+#ifdef CONFIG_SMP
+.Lboot_secondary:
+
+ movi a2, cpu_start_ccount
+1:
+ memw
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ movi a3, 0
+ s32i a3, a2, 0
+1:
+ memw
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ wsr a3, ccount
+ movi a3, 0
+ s32i a3, a2, 0
+ memw
+
+ movi a6, 0
+ wsr a6, excsave1
+
+ call4 secondary_start_kernel
+ j should_never_return
+
+#endif /* CONFIG_SMP */
+
+ENDPROC(_startup)
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+ENTRY(cpu_restart)
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+ ___flush_invalidate_dcache_all a2 a3
+#else
+ ___invalidate_dcache_all a2 a3
+#endif
+ memw
+ movi a2, CCON # MX External Register to Configure Cache
+ movi a3, 0
+ wer a3, a2
+ extw
+
+ rsr a0, prid
+ neg a2, a0
+ movi a3, cpu_start_id
+ memw
+ s32i a2, a3, 0
+#if XCHAL_DCACHE_IS_WRITEBACK
+ dhwbi a3, 0
+#endif
+1:
+ memw
+ l32i a2, a3, 0
+ dhi a3, 0
+ bne a2, a0, 1b
+
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+ j _startup
+
+ENDPROC(cpu_restart)
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * DATA section
+ */
+
+ .section ".data.init.refok"
+ .align 4
+ENTRY(start_info)
+ .long init_thread_union + KERNEL_STACK_SIZE
+
+/*
+ * BSS section
+ */
+
+__PAGE_ALIGNED_BSS
+#ifdef CONFIG_MMU
+ENTRY(swapper_pg_dir)
+ .fill PAGE_SIZE, 1, 0
+END(swapper_pg_dir)
+#endif
+ENTRY(empty_zero_page)
+ .fill PAGE_SIZE, 1, 0
+END(empty_zero_page)
diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c
new file mode 100644
index 000000000..c2e387c19
--- /dev/null
+++ b/arch/xtensa/kernel/hw_breakpoint.c
@@ -0,0 +1,307 @@
+/*
+ * Xtensa hardware breakpoints/watchpoints handling functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#include <linux/hw_breakpoint.h>
+#include <linux/log2.h>
+#include <linux/percpu.h>
+#include <linux/perf_event.h>
+#include <variant/core.h>
+
+/* Breakpoint currently in use for each IBREAKA. */
+static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]);
+
+/* Watchpoint currently in use for each DBREAKA. */
+static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[XCHAL_NUM_DBREAK]);
+
+int hw_breakpoint_slots(int type)
+{
+ switch (type) {
+ case TYPE_INST:
+ return XCHAL_NUM_IBREAK;
+ case TYPE_DATA:
+ return XCHAL_NUM_DBREAK;
+ default:
+ pr_warn("unknown slot type: %d\n", type);
+ return 0;
+ }
+}
+
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
+{
+ unsigned int len;
+ unsigned long va;
+
+ va = hw->address;
+ len = hw->len;
+
+ return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+/*
+ * Construct an arch_hw_breakpoint from a perf_event.
+ */
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ /* Type */
+ switch (attr->bp_type) {
+ case HW_BREAKPOINT_X:
+ hw->type = XTENSA_BREAKPOINT_EXECUTE;
+ break;
+ case HW_BREAKPOINT_R:
+ hw->type = XTENSA_BREAKPOINT_LOAD;
+ break;
+ case HW_BREAKPOINT_W:
+ hw->type = XTENSA_BREAKPOINT_STORE;
+ break;
+ case HW_BREAKPOINT_RW:
+ hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Len */
+ hw->len = attr->bp_len;
+ if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len))
+ return -EINVAL;
+
+ /* Address */
+ hw->address = attr->bp_addr;
+ if (hw->address & (hw->len - 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+static void xtensa_wsr(unsigned long v, u8 sr)
+{
+ /* We don't have indexed wsr and creating instruction dynamically
+ * doesn't seem worth it given how small XCHAL_NUM_IBREAK and
+ * XCHAL_NUM_DBREAK are. Thus the switch. In case build breaks here
+ * the switch below needs to be extended.
+ */
+ BUILD_BUG_ON(XCHAL_NUM_IBREAK > 2);
+ BUILD_BUG_ON(XCHAL_NUM_DBREAK > 2);
+
+ switch (sr) {
+#if XCHAL_NUM_IBREAK > 0
+ case SREG_IBREAKA + 0:
+ WSR(v, SREG_IBREAKA + 0);
+ break;
+#endif
+#if XCHAL_NUM_IBREAK > 1
+ case SREG_IBREAKA + 1:
+ WSR(v, SREG_IBREAKA + 1);
+ break;
+#endif
+
+#if XCHAL_NUM_DBREAK > 0
+ case SREG_DBREAKA + 0:
+ WSR(v, SREG_DBREAKA + 0);
+ break;
+ case SREG_DBREAKC + 0:
+ WSR(v, SREG_DBREAKC + 0);
+ break;
+#endif
+#if XCHAL_NUM_DBREAK > 1
+ case SREG_DBREAKA + 1:
+ WSR(v, SREG_DBREAKA + 1);
+ break;
+
+ case SREG_DBREAKC + 1:
+ WSR(v, SREG_DBREAKC + 1);
+ break;
+#endif
+ }
+}
+
+static int alloc_slot(struct perf_event **slot, size_t n,
+ struct perf_event *bp)
+{
+ size_t i;
+
+ for (i = 0; i < n; ++i) {
+ if (!slot[i]) {
+ slot[i] = bp;
+ return i;
+ }
+ }
+ return -EBUSY;
+}
+
+static void set_ibreak_regs(int reg, struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ unsigned long ibreakenable;
+
+ xtensa_wsr(info->address, SREG_IBREAKA + reg);
+ RSR(ibreakenable, SREG_IBREAKENABLE);
+ WSR(ibreakenable | (1 << reg), SREG_IBREAKENABLE);
+}
+
+static void set_dbreak_regs(int reg, struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ unsigned long dbreakc = DBREAKC_MASK_MASK & -info->len;
+
+ if (info->type & XTENSA_BREAKPOINT_LOAD)
+ dbreakc |= DBREAKC_LOAD_MASK;
+ if (info->type & XTENSA_BREAKPOINT_STORE)
+ dbreakc |= DBREAKC_STOR_MASK;
+
+ xtensa_wsr(info->address, SREG_DBREAKA + reg);
+ xtensa_wsr(dbreakc, SREG_DBREAKC + reg);
+}
+
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ int i;
+
+ if (counter_arch_bp(bp)->type == XTENSA_BREAKPOINT_EXECUTE) {
+ /* Breakpoint */
+ i = alloc_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
+ if (i < 0)
+ return i;
+ set_ibreak_regs(i, bp);
+
+ } else {
+ /* Watchpoint */
+ i = alloc_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
+ if (i < 0)
+ return i;
+ set_dbreak_regs(i, bp);
+ }
+ return 0;
+}
+
+static int free_slot(struct perf_event **slot, size_t n,
+ struct perf_event *bp)
+{
+ size_t i;
+
+ for (i = 0; i < n; ++i) {
+ if (slot[i] == bp) {
+ slot[i] = NULL;
+ return i;
+ }
+ }
+ return -EBUSY;
+}
+
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ int i;
+
+ if (info->type == XTENSA_BREAKPOINT_EXECUTE) {
+ unsigned long ibreakenable;
+
+ /* Breakpoint */
+ i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
+ if (i >= 0) {
+ RSR(ibreakenable, SREG_IBREAKENABLE);
+ WSR(ibreakenable & ~(1 << i), SREG_IBREAKENABLE);
+ }
+ } else {
+ /* Watchpoint */
+ i = free_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
+ if (i >= 0)
+ xtensa_wsr(0, SREG_DBREAKC + i);
+ }
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+}
+
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ int i;
+ struct thread_struct *t = &tsk->thread;
+
+ for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
+ if (t->ptrace_bp[i]) {
+ unregister_hw_breakpoint(t->ptrace_bp[i]);
+ t->ptrace_bp[i] = NULL;
+ }
+ }
+ for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
+ if (t->ptrace_wp[i]) {
+ unregister_hw_breakpoint(t->ptrace_wp[i]);
+ t->ptrace_wp[i] = NULL;
+ }
+ }
+}
+
+/*
+ * Set ptrace breakpoint pointers to zero for this task.
+ * This is required in order to prevent child processes from unregistering
+ * breakpoints held by their parent.
+ */
+void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp));
+ memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp));
+}
+
+void restore_dbreak(void)
+{
+ int i;
+
+ for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
+ struct perf_event *bp = this_cpu_ptr(wp_on_reg)[i];
+
+ if (bp)
+ set_dbreak_regs(i, bp);
+ }
+ clear_thread_flag(TIF_DB_DISABLED);
+}
+
+int check_hw_breakpoint(struct pt_regs *regs)
+{
+ if (regs->debugcause & BIT(DEBUGCAUSE_IBREAK_BIT)) {
+ int i;
+ struct perf_event **bp = this_cpu_ptr(bp_on_reg);
+
+ for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
+ if (bp[i] && !bp[i]->attr.disabled &&
+ regs->pc == bp[i]->attr.bp_addr)
+ perf_bp_event(bp[i], regs);
+ }
+ return 0;
+ } else if (regs->debugcause & BIT(DEBUGCAUSE_DBREAK_BIT)) {
+ struct perf_event **bp = this_cpu_ptr(wp_on_reg);
+ int dbnum = (regs->debugcause & DEBUGCAUSE_DBNUM_MASK) >>
+ DEBUGCAUSE_DBNUM_SHIFT;
+
+ if (dbnum < XCHAL_NUM_DBREAK && bp[dbnum]) {
+ if (user_mode(regs)) {
+ perf_bp_event(bp[dbnum], regs);
+ } else {
+ set_thread_flag(TIF_DB_DISABLED);
+ xtensa_wsr(0, SREG_DBREAKC + dbnum);
+ }
+ } else {
+ WARN_ONCE(1,
+ "Wrong/unconfigured DBNUM reported in DEBUGCAUSE: %d\n",
+ dbnum);
+ }
+ return 0;
+ }
+ return -ENOENT;
+}
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
new file mode 100644
index 000000000..80cc9770a
--- /dev/null
+++ b/arch/xtensa/kernel/irq.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/arch/xtensa/kernel/irq.c
+ *
+ * Xtensa built-in interrupt controller and some generic functions copied
+ * from i386.
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/xtensa-mx.h>
+#include <linux/irqchip/xtensa-pic.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+
+#include <asm/mxregs.h>
+#include <linux/uaccess.h>
+#include <asm/platform.h>
+
+DECLARE_PER_CPU(unsigned long, nmi_count);
+
+asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
+{
+ int irq = irq_find_mapping(NULL, hwirq);
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /* Debugging check for stack overflow: is there less than 1KB free? */
+ {
+ unsigned long sp;
+
+ __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
+ sp &= THREAD_SIZE - 1;
+
+ if (unlikely(sp < (sizeof(thread_info) + 1024)))
+ printk("Stack overflow in do_IRQ: %ld\n",
+ sp - sizeof(struct thread_info));
+ }
+#endif
+ generic_handle_irq(irq);
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ unsigned cpu __maybe_unused;
+#ifdef CONFIG_SMP
+ show_ipi_list(p, prec);
+#endif
+#if XTENSA_FAKE_NMI
+ seq_printf(p, "%*s:", prec, "NMI");
+ for_each_online_cpu(cpu)
+ seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
+ seq_puts(p, " Non-maskable interrupts\n");
+#endif
+ return 0;
+}
+
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+ unsigned long int_irq, unsigned long ext_irq,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1 || intsize > 2))
+ return -EINVAL;
+ if (intsize == 2 && intspec[1] == 1) {
+ int_irq = xtensa_map_ext_irq(ext_irq);
+ if (int_irq < XCHAL_NUM_INTERRUPTS)
+ *out_hwirq = int_irq;
+ else
+ return -EINVAL;
+ } else {
+ *out_hwirq = int_irq;
+ }
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *irq_chip = d->host_data;
+ u32 mask = 1 << hw;
+
+ if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_simple_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_percpu_irq, "timer");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+#ifdef XCHAL_INTTYPE_MASK_PROFILING
+ } else if (mask & XCHAL_INTTYPE_MASK_PROFILING) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_percpu_irq, "profiling");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+#endif
+ } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
+ /* XCHAL_INTTYPE_MASK_NMI */
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ }
+ return 0;
+}
+
+unsigned xtensa_map_ext_irq(unsigned ext_irq)
+{
+ unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
+ unsigned i;
+
+ for (i = 0; mask; ++i, mask >>= 1) {
+ if ((mask & 1) && ext_irq-- == 0)
+ return i;
+ }
+ return XCHAL_NUM_INTERRUPTS;
+}
+
+unsigned xtensa_get_ext_irq_no(unsigned irq)
+{
+ unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL) &
+ ((1u << irq) - 1);
+ return hweight32(mask);
+}
+
+void __init init_IRQ(void)
+{
+#ifdef CONFIG_USE_OF
+ irqchip_init();
+#else
+#ifdef CONFIG_HAVE_SMP
+ xtensa_mx_init_legacy(NULL);
+#else
+ xtensa_pic_init_legacy(NULL);
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+ ipi_init();
+#endif
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The CPU has been marked offline. Migrate IRQs off this CPU. If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
+{
+ unsigned int i, cpu = smp_processor_id();
+
+ for_each_active_irq(i) {
+ struct irq_data *data = irq_get_irq_data(i);
+ struct cpumask *mask;
+ unsigned int newcpu;
+
+ if (irqd_is_per_cpu(data))
+ continue;
+
+ mask = irq_data_get_affinity_mask(data);
+ if (!cpumask_test_cpu(cpu, mask))
+ continue;
+
+ newcpu = cpumask_any_and(mask, cpu_online_mask);
+
+ if (newcpu >= nr_cpu_ids) {
+ pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, cpu);
+
+ cpumask_setall(mask);
+ }
+ irq_set_affinity(i, mask);
+ }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/xtensa/kernel/mcount.S b/arch/xtensa/kernel/mcount.S
new file mode 100644
index 000000000..0eeda2e4a
--- /dev/null
+++ b/arch/xtensa/kernel/mcount.S
@@ -0,0 +1,50 @@
+/*
+ * arch/xtensa/kernel/mcount.S
+ *
+ * Xtensa specific mcount support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+/*
+ * Entry condition:
+ *
+ * a2: a0 of the caller
+ */
+
+ENTRY(_mcount)
+
+ entry a1, 16
+
+ movi a4, ftrace_trace_function
+ l32i a4, a4, 0
+ movi a3, ftrace_stub
+ bne a3, a4, 1f
+ retw
+
+1: xor a7, a2, a1
+ movi a3, 0x3fffffff
+ and a7, a7, a3
+ xor a7, a7, a1
+
+ xor a6, a0, a1
+ and a6, a6, a3
+ xor a6, a6, a1
+ addi a6, a6, -MCOUNT_INSN_SIZE
+ callx4 a4
+
+ retw
+
+ENDPROC(_mcount)
+
+ENTRY(ftrace_stub)
+ entry a1, 16
+ retw
+ENDPROC(ftrace_stub)
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
new file mode 100644
index 000000000..902845dda
--- /dev/null
+++ b/arch/xtensa/kernel/module.c
@@ -0,0 +1,189 @@
+/*
+ * arch/xtensa/kernel/module.c
+ *
+ * Module support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2006 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/cache.h>
+
+static int
+decode_calln_opcode (unsigned char *location)
+{
+#ifdef __XTENSA_EB__
+ return (location[0] & 0xf0) == 0x50;
+#endif
+#ifdef __XTENSA_EL__
+ return (location[0] & 0xf) == 0x5;
+#endif
+}
+
+static int
+decode_l32r_opcode (unsigned char *location)
+{
+#ifdef __XTENSA_EB__
+ return (location[0] & 0xf0) == 0x10;
+#endif
+#ifdef __XTENSA_EL__
+ return (location[0] & 0xf) == 0x1;
+#endif
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *mod)
+{
+ unsigned int i;
+ Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ unsigned char *location;
+ uint32_t value;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
+ location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rela[i].r_offset;
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rela[i].r_info);
+ value = sym->st_value + rela[i].r_addend;
+
+ switch (ELF32_R_TYPE(rela[i].r_info)) {
+ case R_XTENSA_NONE:
+ case R_XTENSA_DIFF8:
+ case R_XTENSA_DIFF16:
+ case R_XTENSA_DIFF32:
+ case R_XTENSA_ASM_EXPAND:
+ break;
+
+ case R_XTENSA_32:
+ case R_XTENSA_PLT:
+ *(uint32_t *)location += value;
+ break;
+
+ case R_XTENSA_SLOT0_OP:
+ if (decode_calln_opcode(location)) {
+ value -= ((unsigned long)location & -4) + 4;
+ if ((value & 3) != 0 ||
+ ((value + (1 << 19)) >> 20) != 0) {
+ pr_err("%s: relocation out of range, "
+ "section %d reloc %d "
+ "sym '%s'\n",
+ mod->name, relsec, i,
+ strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+ value = (signed int)value >> 2;
+#ifdef __XTENSA_EB__
+ location[0] = ((location[0] & ~0x3) |
+ ((value >> 16) & 0x3));
+ location[1] = (value >> 8) & 0xff;
+ location[2] = value & 0xff;
+#endif
+#ifdef __XTENSA_EL__
+ location[0] = ((location[0] & ~0xc0) |
+ ((value << 6) & 0xc0));
+ location[1] = (value >> 2) & 0xff;
+ location[2] = (value >> 10) & 0xff;
+#endif
+ } else if (decode_l32r_opcode(location)) {
+ value -= (((unsigned long)location + 3) & -4);
+ if ((value & 3) != 0 ||
+ (signed int)value >> 18 != -1) {
+ pr_err("%s: relocation out of range, "
+ "section %d reloc %d "
+ "sym '%s'\n",
+ mod->name, relsec, i,
+ strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+ value = (signed int)value >> 2;
+
+#ifdef __XTENSA_EB__
+ location[1] = (value >> 8) & 0xff;
+ location[2] = value & 0xff;
+#endif
+#ifdef __XTENSA_EL__
+ location[1] = value & 0xff;
+ location[2] = (value >> 8) & 0xff;
+#endif
+ }
+ /* FIXME: Ignore any other opcodes. The Xtensa
+ assembler currently assumes that the linker will
+ always do relaxation and so all PC-relative
+ operands need relocations. (The assembler also
+ writes out the tentative PC-relative values,
+ assuming no link-time relaxation, so it is usually
+ safe to ignore the relocations.) If the
+ assembler's "--no-link-relax" flag can be made to
+ work, and if all kernel modules can be assembled
+ with that flag, then unexpected relocations could
+ be detected here. */
+ break;
+
+ case R_XTENSA_SLOT1_OP:
+ case R_XTENSA_SLOT2_OP:
+ case R_XTENSA_SLOT3_OP:
+ case R_XTENSA_SLOT4_OP:
+ case R_XTENSA_SLOT5_OP:
+ case R_XTENSA_SLOT6_OP:
+ case R_XTENSA_SLOT7_OP:
+ case R_XTENSA_SLOT8_OP:
+ case R_XTENSA_SLOT9_OP:
+ case R_XTENSA_SLOT10_OP:
+ case R_XTENSA_SLOT11_OP:
+ case R_XTENSA_SLOT12_OP:
+ case R_XTENSA_SLOT13_OP:
+ case R_XTENSA_SLOT14_OP:
+ pr_err("%s: unexpected FLIX relocation: %u\n",
+ mod->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+
+ case R_XTENSA_SLOT0_ALT:
+ case R_XTENSA_SLOT1_ALT:
+ case R_XTENSA_SLOT2_ALT:
+ case R_XTENSA_SLOT3_ALT:
+ case R_XTENSA_SLOT4_ALT:
+ case R_XTENSA_SLOT5_ALT:
+ case R_XTENSA_SLOT6_ALT:
+ case R_XTENSA_SLOT7_ALT:
+ case R_XTENSA_SLOT8_ALT:
+ case R_XTENSA_SLOT9_ALT:
+ case R_XTENSA_SLOT10_ALT:
+ case R_XTENSA_SLOT11_ALT:
+ case R_XTENSA_SLOT12_ALT:
+ case R_XTENSA_SLOT13_ALT:
+ case R_XTENSA_SLOT14_ALT:
+ pr_err("%s: unexpected ALT relocation: %u\n",
+ mod->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+
+ default:
+ pr_err("%s: unexpected relocation: %u\n",
+ mod->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S
new file mode 100644
index 000000000..9f3843742
--- /dev/null
+++ b/arch/xtensa/kernel/mxhead.S
@@ -0,0 +1,62 @@
+/*
+ * Xtensa Secondary Processors startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
+#include <asm/regs.h>
+
+
+ .section .SecondaryResetVector.text, "ax"
+
+
+ENTRY(_SecondaryResetVector)
+ _j _SetupOCD
+
+ .begin no-absolute-literals
+ .literal_position
+
+_SetupOCD:
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+_SetupMMU:
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ /*
+ * Start Secondary Processors with NULL pointer to boot params.
+ */
+ movi a2, 0 # a2 == NULL
+ movi a3, _startup
+ jx a3
+
+ .end no-absolute-literals
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
new file mode 100644
index 000000000..1fc138b6b
--- /dev/null
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -0,0 +1,212 @@
+/*
+ * DMA coherent memory allocation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * Based on version for i386.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <linux/dma-contiguous.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/dma-direct.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/platform.h>
+
+static void do_cache_op(phys_addr_t paddr, size_t size,
+ void (*fn)(unsigned long, unsigned long))
+{
+ unsigned long off = paddr & (PAGE_SIZE - 1);
+ unsigned long pfn = PFN_DOWN(paddr);
+ struct page *page = pfn_to_page(pfn);
+
+ if (!PageHighMem(page))
+ fn((unsigned long)phys_to_virt(paddr), size);
+ else
+ while (size > 0) {
+ size_t sz = min_t(size_t, size, PAGE_SIZE - off);
+ void *vaddr = kmap_atomic(page);
+
+ fn((unsigned long)vaddr + off, sz);
+ kunmap_atomic(vaddr);
+ off = 0;
+ ++page;
+ size -= sz;
+ }
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ case DMA_FROM_DEVICE:
+ do_cache_op(paddr, size, __invalidate_dcache_range);
+ break;
+
+ case DMA_NONE:
+ BUG();
+ break;
+
+ default:
+ break;
+ }
+}
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ case DMA_TO_DEVICE:
+ if (XCHAL_DCACHE_IS_WRITEBACK)
+ do_cache_op(paddr, size, __flush_dcache_range);
+ break;
+
+ case DMA_NONE:
+ BUG();
+ break;
+
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_MMU
+bool platform_vaddr_cached(const void *p)
+{
+ unsigned long addr = (unsigned long)p;
+
+ return addr >= XCHAL_KSEG_CACHED_VADDR &&
+ addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
+}
+
+bool platform_vaddr_uncached(const void *p)
+{
+ unsigned long addr = (unsigned long)p;
+
+ return addr >= XCHAL_KSEG_BYPASS_VADDR &&
+ addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
+}
+
+void *platform_vaddr_to_uncached(void *p)
+{
+ return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
+}
+
+void *platform_vaddr_to_cached(void *p)
+{
+ return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+}
+#else
+bool __attribute__((weak)) platform_vaddr_cached(const void *p)
+{
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+ return true;
+}
+
+bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
+{
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+ return false;
+}
+
+void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
+{
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+ return p;
+}
+
+void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
+{
+ WARN_ONCE(1, "Default %s implementation is used\n", __func__);
+ return p;
+}
+#endif
+
+/*
+ * Note: We assume that the full memory space is always mapped to 'kseg'
+ * Otherwise we have to use page attributes (not implemented).
+ */
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t flag, unsigned long attrs)
+{
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page *page = NULL;
+
+ /* ignore region speicifiers */
+
+ flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+ flag |= GFP_DMA;
+
+ if (gfpflags_allow_blocking(flag))
+ page = dma_alloc_from_contiguous(dev, count, get_order(size),
+ flag & __GFP_NOWARN);
+
+ if (!page)
+ page = alloc_pages(flag, get_order(size));
+
+ if (!page)
+ return NULL;
+
+ *handle = phys_to_dma(dev, page_to_phys(page));
+
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ return page;
+ }
+
+#ifdef CONFIG_MMU
+ if (PageHighMem(page)) {
+ void *p;
+
+ p = dma_common_contiguous_remap(page, size, VM_MAP,
+ pgprot_noncached(PAGE_KERNEL),
+ __builtin_return_address(0));
+ if (!p) {
+ if (!dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(size));
+ }
+ return p;
+ }
+#endif
+ BUG_ON(!platform_vaddr_cached(page_address(page)));
+ __invalidate_dcache_range((unsigned long)page_address(page), size);
+ return platform_vaddr_to_uncached(page_address(page));
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page *page;
+
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ page = vaddr;
+ } else if (platform_vaddr_uncached(vaddr)) {
+ page = virt_to_page(platform_vaddr_to_cached(vaddr));
+ } else {
+#ifdef CONFIG_MMU
+ dma_common_free_remap(vaddr, size, VM_MAP);
+#endif
+ page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
+ }
+
+ if (!dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(size));
+}
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
new file mode 100644
index 000000000..21f13e9aa
--- /dev/null
+++ b/arch/xtensa/kernel/pci.c
@@ -0,0 +1,216 @@
+/*
+ * arch/xtensa/kernel/pci.c
+ *
+ * PCI bios-type initialisation for PCI machines
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Copyright (C) 2001-2005 Tensilica Inc.
+ *
+ * Based largely on work from Cort (ppc/kernel/pci.c)
+ * IO functions copied from sparc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bootmem.h>
+
+#include <asm/pci-bridge.h>
+#include <asm/platform.h>
+
+/* PCI Controller */
+
+
+/*
+ * pcibios_alloc_controller
+ * pcibios_enable_device
+ * pcibios_fixups
+ * pcibios_align_resource
+ * pcibios_fixup_bus
+ * pci_bus_add_device
+ */
+
+static struct pci_controller *pci_ctrl_head;
+static struct pci_controller **pci_ctrl_tail = &pci_ctrl_head;
+
+static int pci_bus_count;
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ struct pci_dev *dev = data;
+ resource_size_t start = res->start;
+
+ if (res->flags & IORESOURCE_IO) {
+ if (size > 0x100) {
+ pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n",
+ pci_name(dev), dev->resource - res,
+ size);
+ }
+
+ if (start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+ }
+
+ return start;
+}
+
+static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
+ struct list_head *resources)
+{
+ struct resource *res;
+ unsigned long io_offset;
+ int i;
+
+ io_offset = (unsigned long)pci_ctrl->io_space.base;
+ res = &pci_ctrl->io_resource;
+ if (!res->flags) {
+ if (io_offset)
+ pr_err("I/O resource not set for host bridge %d\n",
+ pci_ctrl->index);
+ res->start = 0;
+ res->end = IO_SPACE_LIMIT;
+ res->flags = IORESOURCE_IO;
+ }
+ res->start += io_offset;
+ res->end += io_offset;
+ pci_add_resource_offset(resources, res, io_offset);
+
+ for (i = 0; i < 3; i++) {
+ res = &pci_ctrl->mem_resources[i];
+ if (!res->flags) {
+ if (i > 0)
+ continue;
+ pr_err("Memory resource not set for host bridge %d\n",
+ pci_ctrl->index);
+ res->start = 0;
+ res->end = ~0U;
+ res->flags = IORESOURCE_MEM;
+ }
+ pci_add_resource(resources, res);
+ }
+}
+
+static int __init pcibios_init(void)
+{
+ struct pci_controller *pci_ctrl;
+ struct list_head resources;
+ struct pci_bus *bus;
+ int next_busno = 0, ret;
+
+ pr_info("PCI: Probing PCI hardware\n");
+
+ /* Scan all of the recorded PCI controllers. */
+ for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
+ pci_ctrl->last_busno = 0xff;
+ INIT_LIST_HEAD(&resources);
+ pci_controller_apertures(pci_ctrl, &resources);
+ bus = pci_scan_root_bus(NULL, pci_ctrl->first_busno,
+ pci_ctrl->ops, pci_ctrl, &resources);
+ if (!bus)
+ continue;
+
+ pci_ctrl->bus = bus;
+ pci_ctrl->last_busno = bus->busn_res.end;
+ if (next_busno <= pci_ctrl->last_busno)
+ next_busno = pci_ctrl->last_busno+1;
+ }
+ pci_bus_count = next_busno;
+ ret = platform_pcibios_fixup();
+ if (ret)
+ return ret;
+
+ for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
+ if (pci_ctrl->bus)
+ pci_bus_add_devices(pci_ctrl->bus);
+ }
+
+ return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+ if (bus->parent) {
+ /* This is a subordinate bridge */
+ pci_read_bridge_bases(bus);
+ }
+}
+
+void pcibios_set_master(struct pci_dev *dev)
+{
+ /* No special bus mastering setup handling */
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for (idx=0; idx<6; idx++) {
+ r = &dev->resource[idx];
+ if (!r->start && r->end) {
+ pci_err(dev, "can't enable device: resource collisions\n");
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ if (cmd != old_cmd) {
+ pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+
+ return 0;
+}
+
+/*
+ * Platform support for /proc/bus/pci/X/Y mmap()s.
+ * -- paulus.
+ */
+
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
+{
+ struct pci_controller *pci_ctrl = (struct pci_controller*) pdev->sysdata;
+ resource_size_t ioaddr = pci_resource_start(pdev, bar);
+
+ if (pci_ctrl == 0)
+ return -EINVAL; /* should never happen */
+
+ /* Convert to an offset within this PCI controller */
+ ioaddr -= (unsigned long)pci_ctrl->io_space.base;
+
+ vma->vm_pgoff += (ioaddr + pci_ctrl->io_space.start) >> PAGE_SHIFT;
+ return 0;
+}
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
new file mode 100644
index 000000000..768e1f7ab
--- /dev/null
+++ b/arch/xtensa/kernel/perf_event.c
@@ -0,0 +1,446 @@
+/*
+ * Xtensa Performance Monitor Module driver
+ * See Tensilica Debug User's Guide for PMU registers documentation.
+ *
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+#include <asm/processor.h>
+#include <asm/stacktrace.h>
+
+/* Global control/status for all perf counters */
+#define XTENSA_PMU_PMG 0x1000
+/* Perf counter values */
+#define XTENSA_PMU_PM(i) (0x1080 + (i) * 4)
+/* Perf counter control registers */
+#define XTENSA_PMU_PMCTRL(i) (0x1100 + (i) * 4)
+/* Perf counter status registers */
+#define XTENSA_PMU_PMSTAT(i) (0x1180 + (i) * 4)
+
+#define XTENSA_PMU_PMG_PMEN 0x1
+
+#define XTENSA_PMU_COUNTER_MASK 0xffffffffULL
+#define XTENSA_PMU_COUNTER_MAX 0x7fffffff
+
+#define XTENSA_PMU_PMCTRL_INTEN 0x00000001
+#define XTENSA_PMU_PMCTRL_KRNLCNT 0x00000008
+#define XTENSA_PMU_PMCTRL_TRACELEVEL 0x000000f0
+#define XTENSA_PMU_PMCTRL_SELECT_SHIFT 8
+#define XTENSA_PMU_PMCTRL_SELECT 0x00001f00
+#define XTENSA_PMU_PMCTRL_MASK_SHIFT 16
+#define XTENSA_PMU_PMCTRL_MASK 0xffff0000
+
+#define XTENSA_PMU_MASK(select, mask) \
+ (((select) << XTENSA_PMU_PMCTRL_SELECT_SHIFT) | \
+ ((mask) << XTENSA_PMU_PMCTRL_MASK_SHIFT) | \
+ XTENSA_PMU_PMCTRL_TRACELEVEL | \
+ XTENSA_PMU_PMCTRL_INTEN)
+
+#define XTENSA_PMU_PMSTAT_OVFL 0x00000001
+#define XTENSA_PMU_PMSTAT_INTASRT 0x00000010
+
+struct xtensa_pmu_events {
+ /* Array of events currently on this core */
+ struct perf_event *event[XCHAL_NUM_PERF_COUNTERS];
+ /* Bitmap of used hardware counters */
+ unsigned long used_mask[BITS_TO_LONGS(XCHAL_NUM_PERF_COUNTERS)];
+};
+static DEFINE_PER_CPU(struct xtensa_pmu_events, xtensa_pmu_events);
+
+static const u32 xtensa_hw_ctl[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = XTENSA_PMU_MASK(0, 0x1),
+ [PERF_COUNT_HW_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0xffff),
+ [PERF_COUNT_HW_CACHE_REFERENCES] = XTENSA_PMU_MASK(10, 0x1),
+ [PERF_COUNT_HW_CACHE_MISSES] = XTENSA_PMU_MASK(12, 0x1),
+ /* Taken and non-taken branches + taken loop ends */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0x490),
+ /* Instruction-related + other global stall cycles */
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XTENSA_PMU_MASK(4, 0x1ff),
+ /* Data-related global stall cycles */
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = XTENSA_PMU_MASK(3, 0x1ff),
+};
+
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+
+static const u32 xtensa_cache_ctl[][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(10, 0x1),
+ [C(RESULT_MISS)] = XTENSA_PMU_MASK(10, 0x2),
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(11, 0x1),
+ [C(RESULT_MISS)] = XTENSA_PMU_MASK(11, 0x2),
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(8, 0x1),
+ [C(RESULT_MISS)] = XTENSA_PMU_MASK(8, 0x2),
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(9, 0x1),
+ [C(RESULT_MISS)] = XTENSA_PMU_MASK(9, 0x8),
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(7, 0x1),
+ [C(RESULT_MISS)] = XTENSA_PMU_MASK(7, 0x8),
+ },
+ },
+};
+
+static int xtensa_pmu_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ int ret;
+
+ cache_type = (config >> 0) & 0xff;
+ cache_op = (config >> 8) & 0xff;
+ cache_result = (config >> 16) & 0xff;
+
+ if (cache_type >= ARRAY_SIZE(xtensa_cache_ctl) ||
+ cache_op >= C(OP_MAX) ||
+ cache_result >= C(RESULT_MAX))
+ return -EINVAL;
+
+ ret = xtensa_cache_ctl[cache_type][cache_op][cache_result];
+
+ if (ret == 0)
+ return -EINVAL;
+
+ return ret;
+}
+
+static inline uint32_t xtensa_pmu_read_counter(int idx)
+{
+ return get_er(XTENSA_PMU_PM(idx));
+}
+
+static inline void xtensa_pmu_write_counter(int idx, uint32_t v)
+{
+ set_er(v, XTENSA_PMU_PM(idx));
+}
+
+static void xtensa_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ uint64_t prev_raw_count, new_raw_count;
+ int64_t delta;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = xtensa_pmu_read_counter(event->hw.idx);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count);
+
+ delta = (new_raw_count - prev_raw_count) & XTENSA_PMU_COUNTER_MASK;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static bool xtensa_perf_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ bool rc = false;
+ s64 left;
+
+ if (!is_sampling_event(event)) {
+ left = XTENSA_PMU_COUNTER_MAX;
+ } else {
+ s64 period = hwc->sample_period;
+
+ left = local64_read(&hwc->period_left);
+ if (left <= -period) {
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ rc = true;
+ } else if (left <= 0) {
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ rc = true;
+ }
+ if (left > XTENSA_PMU_COUNTER_MAX)
+ left = XTENSA_PMU_COUNTER_MAX;
+ }
+
+ local64_set(&hwc->prev_count, -left);
+ xtensa_pmu_write_counter(idx, -left);
+ perf_event_update_userpage(event);
+
+ return rc;
+}
+
+static void xtensa_pmu_enable(struct pmu *pmu)
+{
+ set_er(get_er(XTENSA_PMU_PMG) | XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
+}
+
+static void xtensa_pmu_disable(struct pmu *pmu)
+{
+ set_er(get_er(XTENSA_PMU_PMG) & ~XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
+}
+
+static int xtensa_pmu_event_init(struct perf_event *event)
+{
+ int ret;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) ||
+ xtensa_hw_ctl[event->attr.config] == 0)
+ return -EINVAL;
+ event->hw.config = xtensa_hw_ctl[event->attr.config];
+ return 0;
+
+ case PERF_TYPE_HW_CACHE:
+ ret = xtensa_pmu_cache_event(event->attr.config);
+ if (ret < 0)
+ return ret;
+ event->hw.config = ret;
+ return 0;
+
+ case PERF_TYPE_RAW:
+ /* Not 'previous counter' select */
+ if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) ==
+ (1 << XTENSA_PMU_PMCTRL_SELECT_SHIFT))
+ return -EINVAL;
+ event->hw.config = (event->attr.config &
+ (XTENSA_PMU_PMCTRL_KRNLCNT |
+ XTENSA_PMU_PMCTRL_TRACELEVEL |
+ XTENSA_PMU_PMCTRL_SELECT |
+ XTENSA_PMU_PMCTRL_MASK)) |
+ XTENSA_PMU_PMCTRL_INTEN;
+ return 0;
+
+ default:
+ return -ENOENT;
+ }
+}
+
+/*
+ * Starts/Stops a counter present on the PMU. The PMI handler
+ * should stop the counter when perf_event_overflow() returns
+ * !0. ->start() will be used to continue.
+ */
+static void xtensa_pmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (WARN_ON_ONCE(idx == -1))
+ return;
+
+ if (flags & PERF_EF_RELOAD) {
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+ xtensa_perf_event_set_period(event, hwc, idx);
+ }
+
+ hwc->state = 0;
+
+ set_er(hwc->config, XTENSA_PMU_PMCTRL(idx));
+}
+
+static void xtensa_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ set_er(0, XTENSA_PMU_PMCTRL(idx));
+ set_er(get_er(XTENSA_PMU_PMSTAT(idx)),
+ XTENSA_PMU_PMSTAT(idx));
+ hwc->state |= PERF_HES_STOPPED;
+ }
+
+ if ((flags & PERF_EF_UPDATE) &&
+ !(event->hw.state & PERF_HES_UPTODATE)) {
+ xtensa_perf_event_update(event, &event->hw, idx);
+ event->hw.state |= PERF_HES_UPTODATE;
+ }
+}
+
+/*
+ * Adds/Removes a counter to/from the PMU, can be done inside
+ * a transaction, see the ->*_txn() methods.
+ */
+static int xtensa_pmu_add(struct perf_event *event, int flags)
+{
+ struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (__test_and_set_bit(idx, ev->used_mask)) {
+ idx = find_first_zero_bit(ev->used_mask,
+ XCHAL_NUM_PERF_COUNTERS);
+ if (idx == XCHAL_NUM_PERF_COUNTERS)
+ return -EAGAIN;
+
+ __set_bit(idx, ev->used_mask);
+ hwc->idx = idx;
+ }
+ ev->event[idx] = event;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ xtensa_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+ return 0;
+}
+
+static void xtensa_pmu_del(struct perf_event *event, int flags)
+{
+ struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+
+ xtensa_pmu_stop(event, PERF_EF_UPDATE);
+ __clear_bit(event->hw.idx, ev->used_mask);
+ perf_event_update_userpage(event);
+}
+
+static void xtensa_pmu_read(struct perf_event *event)
+{
+ xtensa_perf_event_update(event, &event->hw, event->hw.idx);
+}
+
+static int callchain_trace(struct stackframe *frame, void *data)
+{
+ struct perf_callchain_entry_ctx *entry = data;
+
+ perf_callchain_store(entry, frame->pc);
+ return 0;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ xtensa_backtrace_kernel(regs, entry->max_stack,
+ callchain_trace, NULL, entry);
+}
+
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ xtensa_backtrace_user(regs, entry->max_stack,
+ callchain_trace, entry);
+}
+
+void perf_event_print_debug(void)
+{
+ unsigned long flags;
+ unsigned i;
+
+ local_irq_save(flags);
+ pr_info("CPU#%d: PMG: 0x%08lx\n", smp_processor_id(),
+ get_er(XTENSA_PMU_PMG));
+ for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i)
+ pr_info("PM%d: 0x%08lx, PMCTRL%d: 0x%08lx, PMSTAT%d: 0x%08lx\n",
+ i, get_er(XTENSA_PMU_PM(i)),
+ i, get_er(XTENSA_PMU_PMCTRL(i)),
+ i, get_er(XTENSA_PMU_PMSTAT(i)));
+ local_irq_restore(flags);
+}
+
+irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
+{
+ irqreturn_t rc = IRQ_NONE;
+ struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+ unsigned i;
+
+ for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS);
+ i < XCHAL_NUM_PERF_COUNTERS;
+ i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) {
+ uint32_t v = get_er(XTENSA_PMU_PMSTAT(i));
+ struct perf_event *event = ev->event[i];
+ struct hw_perf_event *hwc = &event->hw;
+ u64 last_period;
+
+ if (!(v & XTENSA_PMU_PMSTAT_OVFL))
+ continue;
+
+ set_er(v, XTENSA_PMU_PMSTAT(i));
+ xtensa_perf_event_update(event, hwc, i);
+ last_period = hwc->last_period;
+ if (xtensa_perf_event_set_period(event, hwc, i)) {
+ struct perf_sample_data data;
+ struct pt_regs *regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0, last_period);
+ if (perf_event_overflow(event, &data, regs))
+ xtensa_pmu_stop(event, 0);
+ }
+
+ rc = IRQ_HANDLED;
+ }
+ return rc;
+}
+
+static struct pmu xtensa_pmu = {
+ .pmu_enable = xtensa_pmu_enable,
+ .pmu_disable = xtensa_pmu_disable,
+ .event_init = xtensa_pmu_event_init,
+ .add = xtensa_pmu_add,
+ .del = xtensa_pmu_del,
+ .start = xtensa_pmu_start,
+ .stop = xtensa_pmu_stop,
+ .read = xtensa_pmu_read,
+};
+
+static int xtensa_pmu_setup(unsigned int cpu)
+{
+ unsigned i;
+
+ set_er(0, XTENSA_PMU_PMG);
+ for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) {
+ set_er(0, XTENSA_PMU_PMCTRL(i));
+ set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i));
+ }
+ return 0;
+}
+
+static int __init xtensa_pmu_init(void)
+{
+ int ret;
+ int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
+
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING,
+ "perf/xtensa:starting", xtensa_pmu_setup,
+ NULL);
+ if (ret) {
+ pr_err("xtensa_pmu: failed to register CPU-hotplug.\n");
+ return ret;
+ }
+#if XTENSA_FAKE_NMI
+ enable_irq(irq);
+#else
+ ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU,
+ "pmu", NULL);
+ if (ret < 0)
+ return ret;
+#endif
+
+ ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW);
+ if (ret)
+ free_irq(irq, NULL);
+
+ return ret;
+}
+early_initcall(xtensa_pmu_init);
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
new file mode 100644
index 000000000..1cf008284
--- /dev/null
+++ b/arch/xtensa/kernel/platform.c
@@ -0,0 +1,46 @@
+/*
+ * arch/xtensa/kernel/platform.c
+ *
+ * Default platform functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <asm/platform.h>
+#include <asm/timex.h>
+#include <asm/param.h> /* HZ */
+
+#define _F(r,f,a,b) \
+ r __platform_##f a b; \
+ r platform_##f a __attribute__((weak, alias("__platform_"#f)))
+
+/*
+ * Default functions that are used if no platform specific function is defined.
+ * (Please, refer to include/asm-xtensa/platform.h for more information)
+ */
+
+_F(void, setup, (char** cmd), { });
+_F(void, restart, (void), { while(1); });
+_F(void, halt, (void), { while(1); });
+_F(void, power_off, (void), { while(1); });
+_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
+_F(void, heartbeat, (void), { });
+_F(int, pcibios_fixup, (void), { return 0; });
+_F(void, pcibios_init, (void), { });
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+_F(void, calibrate_ccount, (void),
+{
+ pr_err("ERROR: Cannot calibrate cpu frequency! Assuming 10MHz.\n");
+ ccount_freq = 10 * 1000000UL;
+});
+#endif
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
new file mode 100644
index 000000000..5a0e0bd68
--- /dev/null
+++ b/arch/xtensa/kernel/process.c
@@ -0,0 +1,373 @@
+/*
+ * arch/xtensa/kernel/process.c
+ *
+ * Xtensa Processor version.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/elf.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/init.h>
+#include <linux/prctl.h>
+#include <linux/init_task.h>
+#include <linux/module.h>
+#include <linux/mqueue.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+
+#include <asm/pgtable.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/mmu.h>
+#include <asm/irq.h>
+#include <linux/atomic.h>
+#include <asm/asm-offsets.h>
+#include <asm/regs.h>
+#include <asm/hw_breakpoint.h>
+
+extern void ret_from_fork(void);
+extern void ret_from_kernel_thread(void);
+
+struct task_struct *current_set[NR_CPUS] = {&init_task, };
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+#if XTENSA_HAVE_COPROCESSORS
+
+void coprocessor_release_all(struct thread_info *ti)
+{
+ unsigned long cpenable;
+ int i;
+
+ /* Make sure we don't switch tasks during this operation. */
+
+ preempt_disable();
+
+ /* Walk through all cp owners and release it for the requested one. */
+
+ cpenable = ti->cpenable;
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if (coprocessor_owner[i] == ti) {
+ coprocessor_owner[i] = 0;
+ cpenable &= ~(1 << i);
+ }
+ }
+
+ ti->cpenable = cpenable;
+ coprocessor_clear_cpenable();
+
+ preempt_enable();
+}
+
+void coprocessor_flush_all(struct thread_info *ti)
+{
+ unsigned long cpenable, old_cpenable;
+ int i;
+
+ preempt_disable();
+
+ RSR_CPENABLE(old_cpenable);
+ cpenable = ti->cpenable;
+ WSR_CPENABLE(cpenable);
+
+ for (i = 0; i < XCHAL_CP_MAX; i++) {
+ if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
+ coprocessor_flush(ti, i);
+ cpenable >>= 1;
+ }
+ WSR_CPENABLE(old_cpenable);
+
+ preempt_enable();
+}
+
+#endif
+
+
+/*
+ * Powermanagement idle function, if any is provided by the platform.
+ */
+void arch_cpu_idle(void)
+{
+ platform_idle();
+}
+
+/*
+ * This is called when the thread calls exit().
+ */
+void exit_thread(struct task_struct *tsk)
+{
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_release_all(task_thread_info(tsk));
+#endif
+}
+
+/*
+ * Flush thread state. This is called when a thread does an execve()
+ * Note that we flush coprocessor registers for the case execve fails.
+ */
+void flush_thread(void)
+{
+#if XTENSA_HAVE_COPROCESSORS
+ struct thread_info *ti = current_thread_info();
+ coprocessor_flush_all(ti);
+ coprocessor_release_all(ti);
+#endif
+ flush_ptrace_hw_breakpoint(current);
+}
+
+/*
+ * this gets called so that we can store coprocessor state into memory and
+ * copy the current task into the new thread.
+ */
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_flush_all(task_thread_info(src));
+#endif
+ *dst = *src;
+ return 0;
+}
+
+/*
+ * Copy thread.
+ *
+ * There are two modes in which this function is called:
+ * 1) Userspace thread creation,
+ * regs != NULL, usp_thread_fn is userspace stack pointer.
+ * It is expected to copy parent regs (in case CLONE_VM is not set
+ * in the clone_flags) and set up passed usp in the childregs.
+ * 2) Kernel thread creation,
+ * regs == NULL, usp_thread_fn is the function to run in the new thread
+ * and thread_fn_arg is its parameter.
+ * childregs are not used for the kernel threads.
+ *
+ * The stack layout for the new thread looks like this:
+ *
+ * +------------------------+
+ * | childregs |
+ * +------------------------+ <- thread.sp = sp in dummy-frame
+ * | dummy-frame | (saved in dummy-frame spill-area)
+ * +------------------------+
+ *
+ * We create a dummy frame to return to either ret_from_fork or
+ * ret_from_kernel_thread:
+ * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
+ * sp points to itself (thread.sp)
+ * a2, a3 are unused for userspace threads,
+ * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
+ *
+ * Note: This is a pristine frame, so we don't need any spill region on top of
+ * childregs.
+ *
+ * The fun part: if we're keeping the same VM (i.e. cloning a thread,
+ * not an entire process), we're normally given a new usp, and we CANNOT share
+ * any live address register windows. If we just copy those live frames over,
+ * the two threads (parent and child) will overflow the same frames onto the
+ * parent stack at different times, likely corrupting the parent stack (esp.
+ * if the parent returns from functions that called clone() and calls new
+ * ones, before the child overflows its now old copies of its parent windows).
+ * One solution is to spill windows to the parent stack, but that's fairly
+ * involved. Much simpler to just not copy those live frames across.
+ */
+
+int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
+ unsigned long thread_fn_arg, struct task_struct *p)
+{
+ struct pt_regs *childregs = task_pt_regs(p);
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ struct thread_info *ti;
+#endif
+
+ /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
+ SPILL_SLOT(childregs, 1) = (unsigned long)childregs;
+ SPILL_SLOT(childregs, 0) = 0;
+
+ p->thread.sp = (unsigned long)childregs;
+
+ if (!(p->flags & PF_KTHREAD)) {
+ struct pt_regs *regs = current_pt_regs();
+ unsigned long usp = usp_thread_fn ?
+ usp_thread_fn : regs->areg[1];
+
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_fork, 0x1);
+
+ /* This does not copy all the regs.
+ * In a bout of brilliance or madness,
+ * ARs beyond a0-a15 exist past the end of the struct.
+ */
+ *childregs = *regs;
+ childregs->areg[1] = usp;
+ childregs->areg[2] = 0;
+
+ /* When sharing memory with the parent thread, the child
+ usually starts on a pristine stack, so we have to reset
+ windowbase, windowstart and wmask.
+ (Note that such a new thread is required to always create
+ an initial call4 frame)
+ The exception is vfork, where the new thread continues to
+ run on the parent's stack until it calls execve. This could
+ be a call8 or call12, which requires a legal stack frame
+ of the previous caller for the overflow handlers to work.
+ (Note that it's always legal to overflow live registers).
+ In this case, ensure to spill at least the stack pointer
+ of that frame. */
+
+ if (clone_flags & CLONE_VM) {
+ /* check that caller window is live and same stack */
+ int len = childregs->wmask & ~0xf;
+ if (regs->areg[1] == usp && len != 0) {
+ int callinc = (regs->areg[0] >> 30) & 3;
+ int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
+ put_user(regs->areg[caller_ars+1],
+ (unsigned __user*)(usp - 12));
+ }
+ childregs->wmask = 1;
+ childregs->windowstart = 1;
+ childregs->windowbase = 0;
+ } else {
+ int len = childregs->wmask & ~0xf;
+ memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
+ &regs->areg[XCHAL_NUM_AREGS - len/4], len);
+ }
+
+ /* The thread pointer is passed in the '4th argument' (= a5) */
+ if (clone_flags & CLONE_SETTLS)
+ childregs->threadptr = childregs->areg[5];
+ } else {
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_kernel_thread, 1);
+
+ /* pass parameters to ret_from_kernel_thread:
+ * a2 = thread_fn, a3 = thread_fn arg
+ */
+ SPILL_SLOT(childregs, 3) = thread_fn_arg;
+ SPILL_SLOT(childregs, 2) = usp_thread_fn;
+
+ /* Childregs are only used when we're going to userspace
+ * in which case start_thread will set them up.
+ */
+ }
+
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ ti = task_thread_info(p);
+ ti->cpenable = 0;
+#endif
+
+ clear_ptrace_hw_breakpoint(p);
+
+ return 0;
+}
+
+
+/*
+ * These bracket the sleeping functions..
+ */
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long sp, pc;
+ unsigned long stack_page = (unsigned long) task_stack_page(p);
+ int count = 0;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ sp = p->thread.sp;
+ pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
+
+ do {
+ if (sp < stack_page + sizeof(struct task_struct) ||
+ sp >= (stack_page + THREAD_SIZE) ||
+ pc == 0)
+ return 0;
+ if (!in_sched_functions(pc))
+ return pc;
+
+ /* Stack layout: sp-4: ra, sp-3: sp' */
+
+ pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
+ sp = SPILL_SLOT(sp, 1);
+ } while (count++ < 16);
+ return 0;
+}
+
+/*
+ * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
+ * of processor registers. Besides different ordering,
+ * xtensa_gregset_t contains non-live register information that
+ * 'struct pt_regs' does not. Exception handling (primarily) uses
+ * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
+ *
+ */
+
+void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
+{
+ unsigned long wb, ws, wm;
+ int live, last;
+
+ wb = regs->windowbase;
+ ws = regs->windowstart;
+ wm = regs->wmask;
+ ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
+
+ /* Don't leak any random bits. */
+
+ memset(elfregs, 0, sizeof(*elfregs));
+
+ /* Note: PS.EXCM is not set while user task is running; its
+ * being set in regs->ps is for exception handling convenience.
+ */
+
+ elfregs->pc = regs->pc;
+ elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
+ elfregs->lbeg = regs->lbeg;
+ elfregs->lend = regs->lend;
+ elfregs->lcount = regs->lcount;
+ elfregs->sar = regs->sar;
+ elfregs->windowstart = ws;
+
+ live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
+ last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
+ memcpy(elfregs->a, regs->areg, live * 4);
+ memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
+}
+
+int dump_fpu(void)
+{
+ return 0;
+}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
new file mode 100644
index 000000000..3cfcbdc45
--- /dev/null
+++ b/arch/xtensa/kernel/ptrace.c
@@ -0,0 +1,517 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Scott Foehner<sfoehner@yahoo.com>,
+ * Kevin Chea
+ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+#include <linux/errno.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/security.h>
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/tracehook.h>
+#include <linux/uaccess.h>
+
+#include <asm/coprocessor.h>
+#include <asm/elf.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+
+
+void user_enable_single_step(struct task_struct *child)
+{
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching to disable single stepping.
+ */
+
+void ptrace_disable(struct task_struct *child)
+{
+ /* Nothing to do.. */
+}
+
+static int ptrace_getregs(struct task_struct *child, void __user *uregs)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ xtensa_gregset_t __user *gregset = uregs;
+ unsigned long wb = regs->windowbase;
+ int i;
+
+ if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
+ return -EIO;
+
+ __put_user(regs->pc, &gregset->pc);
+ __put_user(regs->ps & ~(1 << PS_EXCM_BIT), &gregset->ps);
+ __put_user(regs->lbeg, &gregset->lbeg);
+ __put_user(regs->lend, &gregset->lend);
+ __put_user(regs->lcount, &gregset->lcount);
+ __put_user(regs->windowstart, &gregset->windowstart);
+ __put_user(regs->windowbase, &gregset->windowbase);
+ __put_user(regs->threadptr, &gregset->threadptr);
+
+ for (i = 0; i < XCHAL_NUM_AREGS; i++)
+ __put_user(regs->areg[i],
+ gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS));
+
+ return 0;
+}
+
+static int ptrace_setregs(struct task_struct *child, void __user *uregs)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ xtensa_gregset_t *gregset = uregs;
+ const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
+ unsigned long ps;
+ unsigned long wb, ws;
+
+ if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
+ return -EIO;
+
+ __get_user(regs->pc, &gregset->pc);
+ __get_user(ps, &gregset->ps);
+ __get_user(regs->lbeg, &gregset->lbeg);
+ __get_user(regs->lend, &gregset->lend);
+ __get_user(regs->lcount, &gregset->lcount);
+ __get_user(ws, &gregset->windowstart);
+ __get_user(wb, &gregset->windowbase);
+ __get_user(regs->threadptr, &gregset->threadptr);
+
+ regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
+
+ if (wb >= XCHAL_NUM_AREGS / 4)
+ return -EFAULT;
+
+ if (wb != regs->windowbase || ws != regs->windowstart) {
+ unsigned long rotws, wmask;
+
+ rotws = (((ws | (ws << WSBITS)) >> wb) &
+ ((1 << WSBITS) - 1)) & ~1;
+ wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
+ (rotws & 0xF) | 1;
+ regs->windowbase = wb;
+ regs->windowstart = ws;
+ regs->wmask = wmask;
+ }
+
+ if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
+ gregset->a, wb * 16))
+ return -EFAULT;
+
+ if (__copy_from_user(regs->areg, gregset->a + wb * 4,
+ (WSBITS - wb) * 16))
+ return -EFAULT;
+
+ return 0;
+}
+
+
+#if XTENSA_HAVE_COPROCESSORS
+#define CP_OFFSETS(cp) \
+ { \
+ .elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \
+ .ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \
+ .sz = sizeof(xtregs_ ## cp ## _t), \
+ }
+
+static const struct {
+ size_t elf_xtregs_offset;
+ size_t ti_offset;
+ size_t sz;
+} cp_offsets[] = {
+ CP_OFFSETS(cp0),
+ CP_OFFSETS(cp1),
+ CP_OFFSETS(cp2),
+ CP_OFFSETS(cp3),
+ CP_OFFSETS(cp4),
+ CP_OFFSETS(cp5),
+ CP_OFFSETS(cp6),
+ CP_OFFSETS(cp7),
+};
+#endif
+
+static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ struct thread_info *ti = task_thread_info(child);
+ elf_xtregs_t __user *xtregs = uregs;
+ int ret = 0;
+ int i __maybe_unused;
+
+ if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
+ return -EIO;
+
+#if XTENSA_HAVE_COPROCESSORS
+ /* Flush all coprocessor registers to memory. */
+ coprocessor_flush_all(ti);
+
+ for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
+ ret |= __copy_to_user((char __user *)xtregs +
+ cp_offsets[i].elf_xtregs_offset,
+ (const char *)ti +
+ cp_offsets[i].ti_offset,
+ cp_offsets[i].sz);
+#endif
+ ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
+ sizeof(xtregs->opt));
+ ret |= __copy_to_user(&xtregs->user,&ti->xtregs_user,
+ sizeof(xtregs->user));
+
+ return ret ? -EFAULT : 0;
+}
+
+static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
+{
+ struct thread_info *ti = task_thread_info(child);
+ struct pt_regs *regs = task_pt_regs(child);
+ elf_xtregs_t *xtregs = uregs;
+ int ret = 0;
+ int i __maybe_unused;
+
+ if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
+ return -EFAULT;
+
+#if XTENSA_HAVE_COPROCESSORS
+ /* Flush all coprocessors before we overwrite them. */
+ coprocessor_flush_all(ti);
+ coprocessor_release_all(ti);
+
+ for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
+ ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset,
+ (const char __user *)xtregs +
+ cp_offsets[i].elf_xtregs_offset,
+ cp_offsets[i].sz);
+#endif
+ ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
+ sizeof(xtregs->opt));
+ ret |= __copy_from_user(&ti->xtregs_user, &xtregs->user,
+ sizeof(xtregs->user));
+
+ return ret ? -EFAULT : 0;
+}
+
+static int ptrace_peekusr(struct task_struct *child, long regno,
+ long __user *ret)
+{
+ struct pt_regs *regs;
+ unsigned long tmp;
+
+ regs = task_pt_regs(child);
+ tmp = 0; /* Default return value. */
+
+ switch(regno) {
+ case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+ tmp = regs->areg[regno - REG_AR_BASE];
+ break;
+
+ case REG_A_BASE ... REG_A_BASE + 15:
+ tmp = regs->areg[regno - REG_A_BASE];
+ break;
+
+ case REG_PC:
+ tmp = regs->pc;
+ break;
+
+ case REG_PS:
+ /* Note: PS.EXCM is not set while user task is running;
+ * its being set in regs is for exception handling
+ * convenience.
+ */
+ tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
+ break;
+
+ case REG_WB:
+ break; /* tmp = 0 */
+
+ case REG_WS:
+ {
+ unsigned long wb = regs->windowbase;
+ unsigned long ws = regs->windowstart;
+ tmp = ((ws >> wb) | (ws << (WSBITS - wb))) &
+ ((1 << WSBITS) - 1);
+ break;
+ }
+ case REG_LBEG:
+ tmp = regs->lbeg;
+ break;
+
+ case REG_LEND:
+ tmp = regs->lend;
+ break;
+
+ case REG_LCOUNT:
+ tmp = regs->lcount;
+ break;
+
+ case REG_SAR:
+ tmp = regs->sar;
+ break;
+
+ case SYSCALL_NR:
+ tmp = regs->syscall;
+ break;
+
+ default:
+ return -EIO;
+ }
+ return put_user(tmp, ret);
+}
+
+static int ptrace_pokeusr(struct task_struct *child, long regno, long val)
+{
+ struct pt_regs *regs;
+ regs = task_pt_regs(child);
+
+ switch (regno) {
+ case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+ regs->areg[regno - REG_AR_BASE] = val;
+ break;
+
+ case REG_A_BASE ... REG_A_BASE + 15:
+ regs->areg[regno - REG_A_BASE] = val;
+ break;
+
+ case REG_PC:
+ regs->pc = val;
+ break;
+
+ case SYSCALL_NR:
+ regs->syscall = val;
+ break;
+
+ default:
+ return -EIO;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+static void ptrace_hbptriggered(struct perf_event *bp,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ int i;
+ struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
+
+ if (bp->attr.bp_type & HW_BREAKPOINT_X) {
+ for (i = 0; i < XCHAL_NUM_IBREAK; ++i)
+ if (current->thread.ptrace_bp[i] == bp)
+ break;
+ i <<= 1;
+ } else {
+ for (i = 0; i < XCHAL_NUM_DBREAK; ++i)
+ if (current->thread.ptrace_wp[i] == bp)
+ break;
+ i = (i << 1) | 1;
+ }
+
+ force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
+}
+
+static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
+{
+ struct perf_event_attr attr;
+
+ ptrace_breakpoint_init(&attr);
+
+ /* Initialise fields to sane defaults. */
+ attr.bp_addr = 0;
+ attr.bp_len = 1;
+ attr.bp_type = type;
+ attr.disabled = 1;
+
+ return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
+ tsk);
+}
+
+/*
+ * Address bit 0 choose instruction (0) or data (1) break register, bits
+ * 31..1 are the register number.
+ * Both PTRACE_GETHBPREGS and PTRACE_SETHBPREGS transfer two 32-bit words:
+ * address (0) and control (1).
+ * Instruction breakpoint contorl word is 0 to clear breakpoint, 1 to set.
+ * Data breakpoint control word bit 31 is 'trigger on store', bit 30 is
+ * 'trigger on load, bits 29..0 are length. Length 0 is used to clear a
+ * breakpoint. To set a breakpoint length must be a power of 2 in the range
+ * 1..64 and the address must be length-aligned.
+ */
+
+static long ptrace_gethbpregs(struct task_struct *child, long addr,
+ long __user *datap)
+{
+ struct perf_event *bp;
+ u32 user_data[2] = {0};
+ bool dbreak = addr & 1;
+ unsigned idx = addr >> 1;
+
+ if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
+ (dbreak && idx >= XCHAL_NUM_DBREAK))
+ return -EINVAL;
+
+ if (dbreak)
+ bp = child->thread.ptrace_wp[idx];
+ else
+ bp = child->thread.ptrace_bp[idx];
+
+ if (bp) {
+ user_data[0] = bp->attr.bp_addr;
+ user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len;
+ if (dbreak) {
+ if (bp->attr.bp_type & HW_BREAKPOINT_R)
+ user_data[1] |= DBREAKC_LOAD_MASK;
+ if (bp->attr.bp_type & HW_BREAKPOINT_W)
+ user_data[1] |= DBREAKC_STOR_MASK;
+ }
+ }
+
+ if (copy_to_user(datap, user_data, sizeof(user_data)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long ptrace_sethbpregs(struct task_struct *child, long addr,
+ long __user *datap)
+{
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ u32 user_data[2];
+ bool dbreak = addr & 1;
+ unsigned idx = addr >> 1;
+ int bp_type = 0;
+
+ if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
+ (dbreak && idx >= XCHAL_NUM_DBREAK))
+ return -EINVAL;
+
+ if (copy_from_user(user_data, datap, sizeof(user_data)))
+ return -EFAULT;
+
+ if (dbreak) {
+ bp = child->thread.ptrace_wp[idx];
+ if (user_data[1] & DBREAKC_LOAD_MASK)
+ bp_type |= HW_BREAKPOINT_R;
+ if (user_data[1] & DBREAKC_STOR_MASK)
+ bp_type |= HW_BREAKPOINT_W;
+ } else {
+ bp = child->thread.ptrace_bp[idx];
+ bp_type = HW_BREAKPOINT_X;
+ }
+
+ if (!bp) {
+ bp = ptrace_hbp_create(child,
+ bp_type ? bp_type : HW_BREAKPOINT_RW);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+ if (dbreak)
+ child->thread.ptrace_wp[idx] = bp;
+ else
+ child->thread.ptrace_bp[idx] = bp;
+ }
+
+ attr = bp->attr;
+ attr.bp_addr = user_data[0];
+ attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK);
+ attr.bp_type = bp_type;
+ attr.disabled = !attr.bp_len;
+
+ return modify_user_hw_breakpoint(bp, &attr);
+}
+#endif
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = -EPERM;
+ void __user *datap = (void __user *) data;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
+ break;
+
+ case PTRACE_PEEKUSR: /* read register specified by addr. */
+ ret = ptrace_peekusr(child, addr, datap);
+ break;
+
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = generic_ptrace_pokedata(child, addr, data);
+ break;
+
+ case PTRACE_POKEUSR: /* write register specified by addr. */
+ ret = ptrace_pokeusr(child, addr, data);
+ break;
+
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs(child, datap);
+ break;
+
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs(child, datap);
+ break;
+
+ case PTRACE_GETXTREGS:
+ ret = ptrace_getxregs(child, datap);
+ break;
+
+ case PTRACE_SETXTREGS:
+ ret = ptrace_setxregs(child, datap);
+ break;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ case PTRACE_GETHBPREGS:
+ ret = ptrace_gethbpregs(child, addr, datap);
+ break;
+
+ case PTRACE_SETHBPREGS:
+ ret = ptrace_sethbpregs(child, addr, datap);
+ break;
+#endif
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+unsigned long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ return -1;
+
+ return regs->areg[2];
+}
+
+void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+}
diff --git a/arch/xtensa/kernel/s32c1i_selftest.c b/arch/xtensa/kernel/s32c1i_selftest.c
new file mode 100644
index 000000000..07e56e3a9
--- /dev/null
+++ b/arch/xtensa/kernel/s32c1i_selftest.c
@@ -0,0 +1,128 @@
+/*
+ * S32C1I selftest.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <asm/traps.h>
+
+#if XCHAL_HAVE_S32C1I
+
+static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
+
+/*
+ * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
+ *
+ * If *v == cmp, set *v = set. Return previous *v.
+ */
+static inline int probed_compare_swap(int *v, int cmp, int set)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ " movi %1, 1f\n"
+ " s32i %1, %4, 0\n"
+ " wsr %2, scompare1\n"
+ "1: s32c1i %0, %3, 0\n"
+ : "=a" (set), "=&a" (tmp)
+ : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
+ : "memory"
+ );
+ return set;
+}
+
+/* Handle probed exception */
+
+static void __init do_probed_exception(struct pt_regs *regs,
+ unsigned long exccause)
+{
+ if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
+ regs->pc += 3; /* skip the s32c1i instruction */
+ rcw_exc = exccause;
+ } else {
+ do_unhandled(regs, exccause);
+ }
+}
+
+/* Simple test of S32C1I (soc bringup assist) */
+
+static int __init check_s32c1i(void)
+{
+ int n, cause1, cause2;
+ void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
+
+ rcw_probe_pc = 0;
+ handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
+ do_probed_exception);
+ handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
+ do_probed_exception);
+ handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
+ do_probed_exception);
+
+ /* First try an S32C1I that does not store: */
+ rcw_exc = 0;
+ rcw_word = 1;
+ n = probed_compare_swap(&rcw_word, 0, 2);
+ cause1 = rcw_exc;
+
+ /* took exception? */
+ if (cause1 != 0) {
+ /* unclean exception? */
+ if (n != 2 || rcw_word != 1)
+ panic("S32C1I exception error");
+ } else if (rcw_word != 1 || n != 1) {
+ panic("S32C1I compare error");
+ }
+
+ /* Then an S32C1I that stores: */
+ rcw_exc = 0;
+ rcw_word = 0x1234567;
+ n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
+ cause2 = rcw_exc;
+
+ if (cause2 != 0) {
+ /* unclean exception? */
+ if (n != 0xabcde || rcw_word != 0x1234567)
+ panic("S32C1I exception error (b)");
+ } else if (rcw_word != 0xabcde || n != 0x1234567) {
+ panic("S32C1I store error");
+ }
+
+ /* Verify consistency of exceptions: */
+ if (cause1 || cause2) {
+ pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
+ /* If emulation of S32C1I upon bus error gets implemented,
+ * we can get rid of this panic for single core (not SMP)
+ */
+ panic("S32C1I exceptions not currently supported");
+ }
+ if (cause1 != cause2)
+ panic("inconsistent S32C1I exceptions");
+
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
+ return 0;
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+/* This condition should not occur with a commercially deployed processor.
+ * Display reminder for early engr test or demo chips / FPGA bitstreams
+ */
+static int __init check_s32c1i(void)
+{
+ pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
+ return 0;
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+early_initcall(check_s32c1i);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
new file mode 100644
index 000000000..901990b82
--- /dev/null
+++ b/arch/xtensa/kernel/setup.c
@@ -0,0 +1,740 @@
+/*
+ * arch/xtensa/kernel/setup.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Kevin Chea
+ * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/screen_info.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+# include <linux/console.h>
+#endif
+
+#ifdef CONFIG_PROC_FS
+# include <linux/seq_file.h>
+#endif
+
+#include <asm/bootparam.h>
+#include <asm/kasan.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/timex.h>
+#include <asm/platform.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+#include <asm/param.h>
+#include <asm/smp.h>
+#include <asm/sysmem.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+struct screen_info screen_info = {
+ .orig_x = 0,
+ .orig_y = 24,
+ .orig_video_cols = 80,
+ .orig_video_lines = 24,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 16,
+};
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern unsigned long initrd_start;
+extern unsigned long initrd_end;
+int initrd_is_mapped = 0;
+extern int initrd_below_start_ok;
+#endif
+
+#ifdef CONFIG_USE_OF
+void *dtb_start = __dtb_start;
+#endif
+
+extern unsigned long loops_per_jiffy;
+
+/* Command line specified as configuration option. */
+
+static char __initdata command_line[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_CMDLINE_BOOL
+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
+#endif
+
+#ifdef CONFIG_PARSE_BOOTPARAM
+/*
+ * Boot parameter parsing.
+ *
+ * The Xtensa port uses a list of variable-sized tags to pass data to
+ * the kernel. The first tag must be a BP_TAG_FIRST tag for the list
+ * to be recognised. The list is terminated with a zero-sized
+ * BP_TAG_LAST tag.
+ */
+
+typedef struct tagtable {
+ u32 tag;
+ int (*parse)(const bp_tag_t*);
+} tagtable_t;
+
+#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
+ __attribute__((used, section(".taglist"))) = { tag, fn }
+
+/* parse current tag */
+
+static int __init parse_tag_mem(const bp_tag_t *tag)
+{
+ struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
+ if (mi->type != MEMORY_TYPE_CONVENTIONAL)
+ return -1;
+
+ return memblock_add(mi->start, mi->end - mi->start);
+}
+
+__tagtable(BP_TAG_MEMORY, parse_tag_mem);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int __init parse_tag_initrd(const bp_tag_t* tag)
+{
+ struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
+ initrd_start = (unsigned long)__va(mi->start);
+ initrd_end = (unsigned long)__va(mi->end);
+
+ return 0;
+}
+
+__tagtable(BP_TAG_INITRD, parse_tag_initrd);
+
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+#ifdef CONFIG_USE_OF
+
+static int __init parse_tag_fdt(const bp_tag_t *tag)
+{
+ dtb_start = __va(tag->data[0]);
+ return 0;
+}
+
+__tagtable(BP_TAG_FDT, parse_tag_fdt);
+
+#endif /* CONFIG_USE_OF */
+
+static int __init parse_tag_cmdline(const bp_tag_t* tag)
+{
+ strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
+ return 0;
+}
+
+__tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline);
+
+static int __init parse_bootparam(const bp_tag_t* tag)
+{
+ extern tagtable_t __tagtable_begin, __tagtable_end;
+ tagtable_t *t;
+
+ /* Boot parameters must start with a BP_TAG_FIRST tag. */
+
+ if (tag->id != BP_TAG_FIRST) {
+ pr_warn("Invalid boot parameters!\n");
+ return 0;
+ }
+
+ tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size);
+
+ /* Parse all tags. */
+
+ while (tag != NULL && tag->id != BP_TAG_LAST) {
+ for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
+ if (tag->id == t->tag) {
+ t->parse(tag);
+ break;
+ }
+ }
+ if (t == &__tagtable_end)
+ pr_warn("Ignoring tag 0x%08x\n", tag->id);
+ tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
+ }
+
+ return 0;
+}
+#else
+static int __init parse_bootparam(const bp_tag_t *tag)
+{
+ pr_info("Ignoring boot parameters at %p\n", tag);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_USE_OF
+
+#if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
+unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
+EXPORT_SYMBOL(xtensa_kio_paddr);
+
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ const __be32 *ranges;
+ int len;
+
+ if (depth > 1)
+ return 0;
+
+ if (!of_flat_dt_is_compatible(node, "simple-bus"))
+ return 0;
+
+ ranges = of_get_flat_dt_prop(node, "ranges", &len);
+ if (!ranges)
+ return 1;
+ if (len == 0)
+ return 1;
+
+ xtensa_kio_paddr = of_read_ulong(ranges+1, 1);
+ /* round down to nearest 256MB boundary */
+ xtensa_kio_paddr &= 0xf0000000;
+
+ init_kio();
+
+ return 1;
+}
+#else
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ return 1;
+}
+#endif
+
+void __init early_init_devtree(void *params)
+{
+ early_init_dt_scan(params);
+ of_scan_flat_dt(xtensa_dt_io_area, NULL);
+
+ if (!command_line[0])
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+}
+
+#endif /* CONFIG_USE_OF */
+
+/*
+ * Initialize architecture. (Early stage)
+ */
+
+void __init init_arch(bp_tag_t *bp_start)
+{
+ /* Initialize MMU. */
+
+ init_mmu();
+
+ /* Initialize initial KASAN shadow map */
+
+ kasan_early_init();
+
+ /* Parse boot parameters */
+
+ if (bp_start)
+ parse_bootparam(bp_start);
+
+#ifdef CONFIG_USE_OF
+ early_init_devtree(dtb_start);
+#endif
+
+#ifdef CONFIG_CMDLINE_BOOL
+ if (!command_line[0])
+ strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
+#endif
+
+ /* Early hook for platforms */
+
+ platform_init(bp_start);
+}
+
+/*
+ * Initialize system. Setup memory and reserve regions.
+ */
+
+extern char _end[];
+extern char _stext[];
+extern char _WindowVectors_text_start;
+extern char _WindowVectors_text_end;
+extern char _DebugInterruptVector_text_start;
+extern char _DebugInterruptVector_text_end;
+extern char _KernelExceptionVector_text_start;
+extern char _KernelExceptionVector_text_end;
+extern char _UserExceptionVector_text_start;
+extern char _UserExceptionVector_text_end;
+extern char _DoubleExceptionVector_text_start;
+extern char _DoubleExceptionVector_text_end;
+#if XCHAL_EXCM_LEVEL >= 2
+extern char _Level2InterruptVector_text_start;
+extern char _Level2InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+extern char _Level3InterruptVector_text_start;
+extern char _Level3InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+extern char _Level4InterruptVector_text_start;
+extern char _Level4InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+extern char _Level5InterruptVector_text_start;
+extern char _Level5InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+extern char _Level6InterruptVector_text_start;
+extern char _Level6InterruptVector_text_end;
+#endif
+#ifdef CONFIG_SMP
+extern char _SecondaryResetVector_text_start;
+extern char _SecondaryResetVector_text_end;
+#endif
+
+static inline int __init_memblock mem_reserve(unsigned long start,
+ unsigned long end)
+{
+ return memblock_reserve(start, end - start);
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ pr_info("config ID: %08x:%08x\n",
+ get_sr(SREG_EPC), get_sr(SREG_EXCSAVE));
+ if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 ||
+ get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1)
+ pr_info("built for config ID: %08x:%08x\n",
+ XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1);
+
+ *cmdline_p = command_line;
+ platform_setup(cmdline_p);
+ strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
+
+ /* Reserve some memory regions */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start < initrd_end) {
+ initrd_is_mapped = mem_reserve(__pa(initrd_start),
+ __pa(initrd_end)) == 0;
+ initrd_below_start_ok = 1;
+ } else {
+ initrd_start = 0;
+ }
+#endif
+
+ mem_reserve(__pa(_stext), __pa(_end));
+
+#ifdef CONFIG_VECTORS_OFFSET
+ mem_reserve(__pa(&_WindowVectors_text_start),
+ __pa(&_WindowVectors_text_end));
+
+ mem_reserve(__pa(&_DebugInterruptVector_text_start),
+ __pa(&_DebugInterruptVector_text_end));
+
+ mem_reserve(__pa(&_KernelExceptionVector_text_start),
+ __pa(&_KernelExceptionVector_text_end));
+
+ mem_reserve(__pa(&_UserExceptionVector_text_start),
+ __pa(&_UserExceptionVector_text_end));
+
+ mem_reserve(__pa(&_DoubleExceptionVector_text_start),
+ __pa(&_DoubleExceptionVector_text_end));
+
+#if XCHAL_EXCM_LEVEL >= 2
+ mem_reserve(__pa(&_Level2InterruptVector_text_start),
+ __pa(&_Level2InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ mem_reserve(__pa(&_Level3InterruptVector_text_start),
+ __pa(&_Level3InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ mem_reserve(__pa(&_Level4InterruptVector_text_start),
+ __pa(&_Level4InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ mem_reserve(__pa(&_Level5InterruptVector_text_start),
+ __pa(&_Level5InterruptVector_text_end));
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ mem_reserve(__pa(&_Level6InterruptVector_text_start),
+ __pa(&_Level6InterruptVector_text_end));
+#endif
+
+#endif /* CONFIG_VECTORS_OFFSET */
+
+#ifdef CONFIG_SMP
+ mem_reserve(__pa(&_SecondaryResetVector_text_start),
+ __pa(&_SecondaryResetVector_text_end));
+#endif
+ parse_early_param();
+ bootmem_init();
+ kasan_init();
+ unflatten_and_copy_device_tree();
+
+#ifdef CONFIG_SMP
+ smp_init_cpus();
+#endif
+
+ paging_init();
+ zones_init();
+
+#ifdef CONFIG_VT
+# if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+# elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+# endif
+#endif
+
+#ifdef CONFIG_PCI
+ platform_pcibios_init();
+#endif
+}
+
+static DEFINE_PER_CPU(struct cpu, cpu_data);
+
+static int __init topology_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cpu *cpu = &per_cpu(cpu_data, i);
+ cpu->hotpluggable = !!i;
+ register_cpu(cpu, i);
+ }
+
+ return 0;
+}
+subsys_initcall(topology_init);
+
+void cpu_reset(void)
+{
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
+ local_irq_disable();
+ /*
+ * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
+ * be flushed.
+ * Way 4 is not currently used by linux.
+ * Ways 5 and 6 shall not be touched on MMUv2 as they are hardwired.
+ * Way 5 shall be flushed and way 6 shall be set to identity mapping
+ * on MMUv3.
+ */
+ local_flush_tlb_all();
+ invalidate_page_directory();
+#if XCHAL_HAVE_SPANNING_WAY
+ /* MMU v3 */
+ {
+ unsigned long vaddr = (unsigned long)cpu_reset;
+ unsigned long paddr = __pa(vaddr);
+ unsigned long tmpaddr = vaddr + SZ_512M;
+ unsigned long tmp0, tmp1, tmp2, tmp3;
+
+ /*
+ * Find a place for the temporary mapping. It must not be
+ * in the same 512MB region with vaddr or paddr, otherwise
+ * there may be multihit exception either on entry to the
+ * temporary mapping, or on entry to the identity mapping.
+ * (512MB is the biggest page size supported by TLB.)
+ */
+ while (((tmpaddr ^ paddr) & -SZ_512M) == 0)
+ tmpaddr += SZ_512M;
+
+ /* Invalidate mapping in the selected temporary area */
+ if (itlb_probe(tmpaddr) & BIT(ITLB_HIT_BIT))
+ invalidate_itlb_entry(itlb_probe(tmpaddr));
+ if (itlb_probe(tmpaddr + PAGE_SIZE) & BIT(ITLB_HIT_BIT))
+ invalidate_itlb_entry(itlb_probe(tmpaddr + PAGE_SIZE));
+
+ /*
+ * Map two consecutive pages starting at the physical address
+ * of this function to the temporary mapping area.
+ */
+ write_itlb_entry(__pte((paddr & PAGE_MASK) |
+ _PAGE_HW_VALID |
+ _PAGE_HW_EXEC |
+ _PAGE_CA_BYPASS),
+ tmpaddr & PAGE_MASK);
+ write_itlb_entry(__pte(((paddr & PAGE_MASK) + PAGE_SIZE) |
+ _PAGE_HW_VALID |
+ _PAGE_HW_EXEC |
+ _PAGE_CA_BYPASS),
+ (tmpaddr & PAGE_MASK) + PAGE_SIZE);
+
+ /* Reinitialize TLB */
+ __asm__ __volatile__ ("movi %0, 1f\n\t"
+ "movi %3, 2f\n\t"
+ "add %0, %0, %4\n\t"
+ "add %3, %3, %5\n\t"
+ "jx %0\n"
+ /*
+ * No literal, data or stack access
+ * below this point
+ */
+ "1:\n\t"
+ /* Initialize *tlbcfg */
+ "movi %0, 0\n\t"
+ "wsr %0, itlbcfg\n\t"
+ "wsr %0, dtlbcfg\n\t"
+ /* Invalidate TLB way 5 */
+ "movi %0, 4\n\t"
+ "movi %1, 5\n"
+ "1:\n\t"
+ "iitlb %1\n\t"
+ "idtlb %1\n\t"
+ "add %1, %1, %6\n\t"
+ "addi %0, %0, -1\n\t"
+ "bnez %0, 1b\n\t"
+ /* Initialize TLB way 6 */
+ "movi %0, 7\n\t"
+ "addi %1, %9, 3\n\t"
+ "addi %2, %9, 6\n"
+ "1:\n\t"
+ "witlb %1, %2\n\t"
+ "wdtlb %1, %2\n\t"
+ "add %1, %1, %7\n\t"
+ "add %2, %2, %7\n\t"
+ "addi %0, %0, -1\n\t"
+ "bnez %0, 1b\n\t"
+ "isync\n\t"
+ /* Jump to identity mapping */
+ "jx %3\n"
+ "2:\n\t"
+ /* Complete way 6 initialization */
+ "witlb %1, %2\n\t"
+ "wdtlb %1, %2\n\t"
+ /* Invalidate temporary mapping */
+ "sub %0, %9, %7\n\t"
+ "iitlb %0\n\t"
+ "add %0, %0, %8\n\t"
+ "iitlb %0"
+ : "=&a"(tmp0), "=&a"(tmp1), "=&a"(tmp2),
+ "=&a"(tmp3)
+ : "a"(tmpaddr - vaddr),
+ "a"(paddr - vaddr),
+ "a"(SZ_128M), "a"(SZ_512M),
+ "a"(PAGE_SIZE),
+ "a"((tmpaddr + SZ_512M) & PAGE_MASK)
+ : "memory");
+ }
+#endif
+#endif
+ __asm__ __volatile__ ("movi a2, 0\n\t"
+ "wsr a2, icountlevel\n\t"
+ "movi a2, 0\n\t"
+ "wsr a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
+ "wsr a2, ibreakenable\n\t"
+#endif
+#if XCHAL_HAVE_LOOPS
+ "wsr a2, lcount\n\t"
+#endif
+ "movi a2, 0x1f\n\t"
+ "wsr a2, ps\n\t"
+ "isync\n\t"
+ "jx %0\n\t"
+ :
+ : "a" (XCHAL_RESET_VECTOR_VADDR)
+ : "a2");
+ for (;;)
+ ;
+}
+
+void machine_restart(char * cmd)
+{
+ platform_restart();
+}
+
+void machine_halt(void)
+{
+ platform_halt();
+ while (1);
+}
+
+void machine_power_off(void)
+{
+ platform_power_off();
+ while (1);
+}
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Display some core information through /proc/cpuinfo.
+ */
+
+static int
+c_show(struct seq_file *f, void *slot)
+{
+ /* high-level stuff */
+ seq_printf(f, "CPU count\t: %u\n"
+ "CPU list\t: %*pbl\n"
+ "vendor_id\t: Tensilica\n"
+ "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
+ "core ID\t\t: " XCHAL_CORE_ID "\n"
+ "build ID\t: 0x%x\n"
+ "config ID\t: %08x:%08x\n"
+ "byte order\t: %s\n"
+ "cpu MHz\t\t: %lu.%02lu\n"
+ "bogomips\t: %lu.%02lu\n",
+ num_online_cpus(),
+ cpumask_pr_args(cpu_online_mask),
+ XCHAL_BUILD_UNIQUE_ID,
+ get_sr(SREG_EPC), get_sr(SREG_EXCSAVE),
+ XCHAL_HAVE_BE ? "big" : "little",
+ ccount_freq/1000000,
+ (ccount_freq/10000) % 100,
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100);
+ seq_puts(f, "flags\t\t: "
+#if XCHAL_HAVE_NMI
+ "nmi "
+#endif
+#if XCHAL_HAVE_DEBUG
+ "debug "
+# if XCHAL_HAVE_OCD
+ "ocd "
+# endif
+#endif
+#if XCHAL_HAVE_DENSITY
+ "density "
+#endif
+#if XCHAL_HAVE_BOOLEANS
+ "boolean "
+#endif
+#if XCHAL_HAVE_LOOPS
+ "loop "
+#endif
+#if XCHAL_HAVE_NSA
+ "nsa "
+#endif
+#if XCHAL_HAVE_MINMAX
+ "minmax "
+#endif
+#if XCHAL_HAVE_SEXT
+ "sext "
+#endif
+#if XCHAL_HAVE_CLAMPS
+ "clamps "
+#endif
+#if XCHAL_HAVE_MAC16
+ "mac16 "
+#endif
+#if XCHAL_HAVE_MUL16
+ "mul16 "
+#endif
+#if XCHAL_HAVE_MUL32
+ "mul32 "
+#endif
+#if XCHAL_HAVE_MUL32_HIGH
+ "mul32h "
+#endif
+#if XCHAL_HAVE_FP
+ "fpu "
+#endif
+#if XCHAL_HAVE_S32C1I
+ "s32c1i "
+#endif
+ "\n");
+
+ /* Registers. */
+ seq_printf(f,"physical aregs\t: %d\n"
+ "misc regs\t: %d\n"
+ "ibreak\t\t: %d\n"
+ "dbreak\t\t: %d\n",
+ XCHAL_NUM_AREGS,
+ XCHAL_NUM_MISC_REGS,
+ XCHAL_NUM_IBREAK,
+ XCHAL_NUM_DBREAK);
+
+
+ /* Interrupt. */
+ seq_printf(f,"num ints\t: %d\n"
+ "ext ints\t: %d\n"
+ "int levels\t: %d\n"
+ "timers\t\t: %d\n"
+ "debug level\t: %d\n",
+ XCHAL_NUM_INTERRUPTS,
+ XCHAL_NUM_EXTINTERRUPTS,
+ XCHAL_NUM_INTLEVELS,
+ XCHAL_NUM_TIMERS,
+ XCHAL_DEBUGLEVEL);
+
+ /* Cache */
+ seq_printf(f,"icache line size: %d\n"
+ "icache ways\t: %d\n"
+ "icache size\t: %d\n"
+ "icache flags\t: "
+#if XCHAL_ICACHE_LINE_LOCKABLE
+ "lock "
+#endif
+ "\n"
+ "dcache line size: %d\n"
+ "dcache ways\t: %d\n"
+ "dcache size\t: %d\n"
+ "dcache flags\t: "
+#if XCHAL_DCACHE_IS_WRITEBACK
+ "writeback "
+#endif
+#if XCHAL_DCACHE_LINE_LOCKABLE
+ "lock "
+#endif
+ "\n",
+ XCHAL_ICACHE_LINESIZE,
+ XCHAL_ICACHE_WAYS,
+ XCHAL_ICACHE_SIZE,
+ XCHAL_DCACHE_LINESIZE,
+ XCHAL_DCACHE_WAYS,
+ XCHAL_DCACHE_SIZE);
+
+ return 0;
+}
+
+/*
+ * We show only CPU #0 info.
+ */
+static void *
+c_start(struct seq_file *f, loff_t *pos)
+{
+ return (*pos == 0) ? (void *)1 : NULL;
+}
+
+static void *
+c_next(struct seq_file *f, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(f, pos);
+}
+
+static void
+c_stop(struct seq_file *f, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op =
+{
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show,
+};
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
new file mode 100644
index 000000000..72039bd61
--- /dev/null
+++ b/arch/xtensa/kernel/signal.c
@@ -0,0 +1,496 @@
+/*
+ * arch/xtensa/kernel/signal.c
+ *
+ * Default platform functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005, 2006 Tensilica Inc.
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ */
+
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/tracehook.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/ucontext.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/coprocessor.h>
+#include <asm/unistd.h>
+
+extern struct task_struct *coproc_owners[];
+
+struct rt_sigframe
+{
+ struct siginfo info;
+ struct ucontext uc;
+ struct {
+ xtregs_opt_t opt;
+ xtregs_user_t user;
+#if XTENSA_HAVE_COPROCESSORS
+ xtregs_coprocessor_t cp;
+#endif
+ } xtregs;
+ unsigned char retcode[6];
+ unsigned int window[4];
+};
+
+/*
+ * Flush register windows stored in pt_regs to stack.
+ * Returns 1 for errors.
+ */
+
+int
+flush_window_regs_user(struct pt_regs *regs)
+{
+ const unsigned long ws = regs->windowstart;
+ const unsigned long wb = regs->windowbase;
+ unsigned long sp = 0;
+ unsigned long wm;
+ int err = 1;
+ int base;
+
+ /* Return if no other frames. */
+
+ if (regs->wmask == 1)
+ return 0;
+
+ /* Rotate windowmask and skip empty frames. */
+
+ wm = (ws >> wb) | (ws << (XCHAL_NUM_AREGS / 4 - wb));
+ base = (XCHAL_NUM_AREGS / 4) - (regs->wmask >> 4);
+
+ /* For call8 or call12 frames, we need the previous stack pointer. */
+
+ if ((regs->wmask & 2) == 0)
+ if (__get_user(sp, (int*)(regs->areg[base * 4 + 1] - 12)))
+ goto errout;
+
+ /* Spill frames to stack. */
+
+ while (base < XCHAL_NUM_AREGS / 4) {
+
+ int m = (wm >> base);
+ int inc = 0;
+
+ /* Save registers a4..a7 (call8) or a4...a11 (call12) */
+
+ if (m & 2) { /* call4 */
+ inc = 1;
+
+ } else if (m & 4) { /* call8 */
+ if (copy_to_user(&SPILL_SLOT_CALL8(sp, 4),
+ &regs->areg[(base + 1) * 4], 16))
+ goto errout;
+ inc = 2;
+
+ } else if (m & 8) { /* call12 */
+ if (copy_to_user(&SPILL_SLOT_CALL12(sp, 4),
+ &regs->areg[(base + 1) * 4], 32))
+ goto errout;
+ inc = 3;
+ }
+
+ /* Save current frame a0..a3 under next SP */
+
+ sp = regs->areg[((base + inc) * 4 + 1) % XCHAL_NUM_AREGS];
+ if (copy_to_user(&SPILL_SLOT(sp, 0), &regs->areg[base * 4], 16))
+ goto errout;
+
+ /* Get current stack pointer for next loop iteration. */
+
+ sp = regs->areg[base * 4 + 1];
+ base += inc;
+ }
+
+ regs->wmask = 1;
+ regs->windowstart = 1 << wb;
+
+ return 0;
+
+errout:
+ return err;
+}
+
+/*
+ * Note: We don't copy double exception 'regs', we have to finish double exc.
+ * first before we return to signal handler! This dbl.exc.handler might cause
+ * another double exception, but I think we are fine as the situation is the
+ * same as if we had returned to the signal handerl and got an interrupt
+ * immediately...
+ */
+
+static int
+setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
+{
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ struct thread_info *ti = current_thread_info();
+ int err = 0;
+
+#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
+ COPY(pc);
+ COPY(ps);
+ COPY(lbeg);
+ COPY(lend);
+ COPY(lcount);
+ COPY(sar);
+#undef COPY
+
+ err |= flush_window_regs_user(regs);
+ err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
+ err |= __put_user(0, &sc->sc_xtregs);
+
+ if (err)
+ return err;
+
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_flush_all(ti);
+ coprocessor_release_all(ti);
+ err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
+ sizeof (frame->xtregs.cp));
+#endif
+ err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt,
+ sizeof (xtregs_opt_t));
+ err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
+ sizeof (xtregs_user_t));
+
+ err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs);
+
+ return err;
+}
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
+{
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ struct thread_info *ti = current_thread_info();
+ unsigned int err = 0;
+ unsigned long ps;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
+ COPY(pc);
+ COPY(lbeg);
+ COPY(lend);
+ COPY(lcount);
+ COPY(sar);
+#undef COPY
+
+ /* All registers were flushed to stack. Start with a prestine frame. */
+
+ regs->wmask = 1;
+ regs->windowbase = 0;
+ regs->windowstart = 1;
+
+ regs->syscall = -1; /* disable syscall checks */
+
+ /* For PS, restore only PS.CALLINC.
+ * Assume that all other bits are either the same as for the signal
+ * handler, or the user mode value doesn't matter (e.g. PS.OWB).
+ */
+ err |= __get_user(ps, &sc->sc_ps);
+ regs->ps = (regs->ps & ~PS_CALLINC_MASK) | (ps & PS_CALLINC_MASK);
+
+ /* Additional corruption checks */
+
+ if ((regs->lcount > 0)
+ && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
+ err = 1;
+
+ err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
+
+ if (err)
+ return err;
+
+ /* The signal handler may have used coprocessors in which
+ * case they are still enabled. We disable them to force a
+ * reloading of the original task's CP state by the lazy
+ * context-switching mechanisms of CP exception handling.
+ * Also, we essentially discard any coprocessor state that the
+ * signal handler created. */
+
+#if XTENSA_HAVE_COPROCESSORS
+ coprocessor_release_all(ti);
+ err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp,
+ sizeof (frame->xtregs.cp));
+#endif
+ err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
+ sizeof (xtregs_user_t));
+ err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt,
+ sizeof (xtregs_opt_t));
+
+ return err;
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
+ long a4, long a5, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ sigset_t set;
+ int ret;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ if (regs->depc > 64)
+ panic("rt_sigreturn in double exception!\n");
+
+ frame = (struct rt_sigframe __user *) regs->areg[1];
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, frame))
+ goto badframe;
+
+ ret = regs->areg[2];
+
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return ret;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+gen_return_code(unsigned char *codemem)
+{
+ int err = 0;
+
+ /*
+ * The 12-bit immediate is really split up within the 24-bit MOVI
+ * instruction. As long as the above system call numbers fit within
+ * 8-bits, the following code works fine. See the Xtensa ISA for
+ * details.
+ */
+
+#if __NR_rt_sigreturn > 255
+# error Generating the MOVI instruction below breaks!
+#endif
+
+#ifdef __XTENSA_EB__ /* Big Endian version */
+ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
+ err |= __put_user(0x22, &codemem[0]);
+ err |= __put_user(0x0a, &codemem[1]);
+ err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
+ /* Generate instruction: SYSCALL */
+ err |= __put_user(0x00, &codemem[3]);
+ err |= __put_user(0x05, &codemem[4]);
+ err |= __put_user(0x00, &codemem[5]);
+
+#elif defined __XTENSA_EL__ /* Little Endian version */
+ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
+ err |= __put_user(0x22, &codemem[0]);
+ err |= __put_user(0xa0, &codemem[1]);
+ err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
+ /* Generate instruction: SYSCALL */
+ err |= __put_user(0x00, &codemem[3]);
+ err |= __put_user(0x50, &codemem[4]);
+ err |= __put_user(0x00, &codemem[5]);
+#else
+# error Must use compiler for Xtensa processors.
+#endif
+
+ /* Flush generated code out of the data cache */
+
+ if (err == 0) {
+ __invalidate_icache_range((unsigned long)codemem, 6UL);
+ __flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
+ }
+
+ return err;
+}
+
+
+static int setup_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0, sig = ksig->sig;
+ unsigned long sp, ra, tp;
+
+ sp = regs->areg[1];
+
+ if ((ksig->ka.sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+ frame = (void *)((sp - sizeof(*frame)) & -16ul);
+
+ if (regs->depc > 64)
+ panic ("Double exception sys_sigreturn\n");
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) {
+ return -EFAULT;
+ }
+
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+ }
+
+ /* Create the user context. */
+
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->areg[1]);
+ err |= setup_sigcontext(frame, regs);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ if (ksig->ka.sa.sa_flags & SA_RESTORER) {
+ ra = (unsigned long)ksig->ka.sa.sa_restorer;
+ } else {
+
+ /* Create sys_rt_sigreturn syscall in stack frame */
+
+ err |= gen_return_code(frame->retcode);
+
+ if (err) {
+ return -EFAULT;
+ }
+ ra = (unsigned long) frame->retcode;
+ }
+
+ /*
+ * Create signal handler execution context.
+ * Return context not modified until this point.
+ */
+
+ /* Set up registers for signal handler; preserve the threadptr */
+ tp = regs->threadptr;
+ start_thread(regs, (unsigned long) ksig->ka.sa.sa_handler,
+ (unsigned long) frame);
+
+ /* Set up a stack frame for a call4
+ * Note: PS.CALLINC is set to one by start_thread
+ */
+ regs->areg[4] = (((unsigned long) ra) & 0x3fffffff) | 0x40000000;
+ regs->areg[6] = (unsigned long) sig;
+ regs->areg[7] = (unsigned long) &frame->info;
+ regs->areg[8] = (unsigned long) &frame->uc;
+ regs->threadptr = tp;
+
+ pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n",
+ current->comm, current->pid, sig, frame, regs->pc);
+
+ return 0;
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ task_pt_regs(current)->icountlevel = 0;
+
+ if (get_signal(&ksig)) {
+ int ret;
+
+ /* Are we from a system call? */
+
+ if ((signed)regs->syscall >= 0) {
+
+ /* If so, check system call restarting.. */
+
+ switch (regs->areg[2]) {
+ case -ERESTARTNOHAND:
+ case -ERESTART_RESTARTBLOCK:
+ regs->areg[2] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ksig.ka.sa.sa_flags & SA_RESTART)) {
+ regs->areg[2] = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->areg[2] = regs->syscall;
+ regs->pc -= 3;
+ break;
+
+ default:
+ /* nothing to do */
+ if (regs->areg[2] != 0)
+ break;
+ }
+ }
+
+ /* Whee! Actually deliver the signal. */
+ /* Set up the stack frame */
+ ret = setup_frame(&ksig, sigmask_to_save(), regs);
+ signal_setup_done(ret, &ksig, 0);
+ if (test_thread_flag(TIF_SINGLESTEP))
+ task_pt_regs(current)->icountlevel = 1;
+
+ return;
+ }
+
+ /* Did we come from a system call? */
+ if ((signed) regs->syscall >= 0) {
+ /* Restart the system call - no handlers present */
+ switch (regs->areg[2]) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->areg[2] = regs->syscall;
+ regs->pc -= 3;
+ break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->areg[2] = __NR_restart_syscall;
+ regs->pc -= 3;
+ break;
+ }
+ }
+
+ /* If there's no signal to deliver, we just restore the saved mask. */
+ restore_saved_sigmask();
+
+ if (test_thread_flag(TIF_SINGLESTEP))
+ task_pt_regs(current)->icountlevel = 1;
+ return;
+}
+
+void do_notify_resume(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SIGPENDING))
+ do_signal(regs);
+
+ if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+ tracehook_notify_resume(regs);
+}
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
new file mode 100644
index 000000000..be1f280c3
--- /dev/null
+++ b/arch/xtensa/kernel/smp.c
@@ -0,0 +1,624 @@
+/*
+ * Xtensa SMP support functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ * Pete Delaney <piet@tensilica.com
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/reboot.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/thread_info.h>
+
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/mmu_context.h>
+#include <asm/mxregs.h>
+#include <asm/platform.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+#ifdef CONFIG_SMP
+# if XCHAL_HAVE_S32C1I == 0
+# error "The S32C1I option is required for SMP."
+# endif
+#endif
+
+static void system_invalidate_dcache_range(unsigned long start,
+ unsigned long size);
+static void system_flush_invalidate_dcache_range(unsigned long start,
+ unsigned long size);
+
+/* IPI (Inter Process Interrupt) */
+
+#define IPI_IRQ 0
+
+static irqreturn_t ipi_interrupt(int irq, void *dev_id);
+static struct irqaction ipi_irqaction = {
+ .handler = ipi_interrupt,
+ .flags = IRQF_PERCPU,
+ .name = "ipi",
+};
+
+void ipi_init(void)
+{
+ unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
+ setup_irq(irq, &ipi_irqaction);
+}
+
+static inline unsigned int get_core_count(void)
+{
+ /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
+ unsigned int syscfgid = get_er(SYSCFGID);
+ return ((syscfgid >> 18) & 0xf) + 1;
+}
+
+static inline int get_core_id(void)
+{
+ /* Bits 0...18 of SYSCFGID contain the core id */
+ unsigned int core_id = get_er(SYSCFGID);
+ return core_id & 0x3fff;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned i;
+
+ for_each_possible_cpu(i)
+ set_cpu_present(i, true);
+}
+
+void __init smp_init_cpus(void)
+{
+ unsigned i;
+ unsigned int ncpus = get_core_count();
+ unsigned int core_id = get_core_id();
+
+ pr_info("%s: Core Count = %d\n", __func__, ncpus);
+ pr_info("%s: Core Id = %d\n", __func__, core_id);
+
+ if (ncpus > NR_CPUS) {
+ ncpus = NR_CPUS;
+ pr_info("%s: limiting core count by %d\n", __func__, ncpus);
+ }
+
+ for (i = 0; i < ncpus; ++i)
+ set_cpu_possible(i, true);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ unsigned int cpu = smp_processor_id();
+ BUG_ON(cpu != 0);
+ cpu_asid_cache(cpu) = ASID_USER_FIRST;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
+static DECLARE_COMPLETION(cpu_running);
+
+void secondary_start_kernel(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ init_mmu();
+
+#ifdef CONFIG_DEBUG_KERNEL
+ if (boot_secondary_processors == 0) {
+ pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
+ __func__, boot_secondary_processors, cpu);
+ for (;;)
+ __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
+ }
+
+ pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
+ __func__, boot_secondary_processors, cpu);
+#endif
+ /* Init EXCSAVE1 */
+
+ secondary_trap_init();
+
+ /* All kernel threads share the same mm context. */
+
+ mmget(mm);
+ mmgrab(mm);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+ enter_lazy_tlb(mm, current);
+
+ preempt_disable();
+ trace_hardirqs_off();
+
+ calibrate_delay();
+
+ notify_cpu_starting(cpu);
+
+ secondary_init_irq();
+ local_timer_setup(cpu);
+
+ set_cpu_online(cpu, true);
+
+ local_irq_enable();
+
+ complete(&cpu_running);
+
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+static void mx_cpu_start(void *p)
+{
+ unsigned cpu = (unsigned)p;
+ unsigned long run_stall_mask = get_er(MPSCORE);
+
+ set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
+ pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+ __func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+static void mx_cpu_stop(void *p)
+{
+ unsigned cpu = (unsigned)p;
+ unsigned long run_stall_mask = get_er(MPSCORE);
+
+ set_er(run_stall_mask | (1u << cpu), MPSCORE);
+ pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+ __func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+unsigned long cpu_start_id __cacheline_aligned;
+#endif
+unsigned long cpu_start_ccount;
+
+static int boot_secondary(unsigned int cpu, struct task_struct *ts)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ unsigned long ccount;
+ int i;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ WRITE_ONCE(cpu_start_id, cpu);
+ /* Pairs with the third memw in the cpu_restart */
+ mb();
+ system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
+#endif
+ smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
+
+ for (i = 0; i < 2; ++i) {
+ do
+ ccount = get_ccount();
+ while (!ccount);
+
+ WRITE_ONCE(cpu_start_ccount, ccount);
+
+ do {
+ /*
+ * Pairs with the first two memws in the
+ * .Lboot_secondary.
+ */
+ mb();
+ ccount = READ_ONCE(cpu_start_ccount);
+ } while (ccount && time_before(jiffies, timeout));
+
+ if (ccount) {
+ smp_call_function_single(0, mx_cpu_stop,
+ (void *)cpu, 1);
+ WRITE_ONCE(cpu_start_ccount, 0);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ int ret = 0;
+
+ if (cpu_asid_cache(cpu) == 0)
+ cpu_asid_cache(cpu) = ASID_USER_FIRST;
+
+ start_info.stack = (unsigned long)task_pt_regs(idle);
+ wmb();
+
+ pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
+ __func__, cpu, idle, start_info.stack);
+
+ init_completion(&cpu_running);
+ ret = boot_secondary(cpu, idle);
+ if (ret == 0) {
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
+ if (!cpu_online(cpu))
+ ret = -EIO;
+ }
+
+ if (ret)
+ pr_err("CPU %u failed to boot\n", cpu);
+
+ return ret;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Flush user cache and TLB mappings, and then remove this CPU
+ * from the vm mask set of all processes.
+ */
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ invalidate_page_directory();
+
+ clear_tasks_mm_cpumask(cpu);
+
+ return 0;
+}
+
+static void platform_cpu_kill(unsigned int cpu)
+{
+ smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ while (time_before(jiffies, timeout)) {
+ system_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
+ /* Pairs with the second memw in the cpu_restart */
+ mb();
+ if (READ_ONCE(cpu_start_id) == -cpu) {
+ platform_cpu_kill(cpu);
+ return;
+ }
+ }
+ pr_err("CPU%u: unable to kill\n", cpu);
+}
+
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void __ref cpu_die(void)
+{
+ idle_task_exit();
+ local_irq_disable();
+ __asm__ __volatile__(
+ " movi a2, cpu_restart\n"
+ " jx a2\n");
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+enum ipi_msg_type {
+ IPI_RESCHEDULE = 0,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+ IPI_MAX
+};
+
+static const struct {
+ const char *short_text;
+ const char *long_text;
+} ipi_text[] = {
+ { .short_text = "RES", .long_text = "Rescheduling interrupts" },
+ { .short_text = "CAL", .long_text = "Function call interrupts" },
+ { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
+};
+
+struct ipi_data {
+ unsigned long ipi_count[IPI_MAX];
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void send_ipi_message(const struct cpumask *callmask,
+ enum ipi_msg_type msg_id)
+{
+ int index;
+ unsigned long mask = 0;
+
+ for_each_cpu(index, callmask)
+ if (index != smp_processor_id())
+ mask |= 1 << index;
+
+ set_er(mask, MIPISET(msg_id));
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ send_ipi_message(&targets, IPI_CPU_STOP);
+}
+
+static void ipi_cpu_stop(unsigned int cpu)
+{
+ set_cpu_online(cpu, false);
+ machine_halt();
+}
+
+irqreturn_t ipi_interrupt(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ unsigned int msg;
+ unsigned i;
+
+ msg = get_er(MIPICAUSE(cpu));
+ for (i = 0; i < IPI_MAX; i++)
+ if (msg & (1 << i)) {
+ set_er(1 << i, MIPICAUSE(cpu));
+ ++ipi->ipi_count[i];
+ }
+
+ if (msg & (1 << IPI_RESCHEDULE))
+ scheduler_ipi();
+ if (msg & (1 << IPI_CALL_FUNC))
+ generic_smp_call_function_interrupt();
+ if (msg & (1 << IPI_CPU_STOP))
+ ipi_cpu_stop(cpu);
+
+ return IRQ_HANDLED;
+}
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+ unsigned int cpu;
+ unsigned i;
+
+ for (i = 0; i < IPI_MAX; ++i) {
+ seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
+ for_each_online_cpu(cpu)
+ seq_printf(p, " %10lu",
+ per_cpu(ipi_data, cpu).ipi_count[i]);
+ seq_printf(p, " %s\n", ipi_text[i].long_text);
+ }
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ pr_debug("setup_profiling_timer %d\n", multiplier);
+ return 0;
+}
+
+/* TLB flush functions */
+
+struct flush_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void ipi_flush_tlb_all(void *arg)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+static void ipi_flush_tlb_mm(void *arg)
+{
+ local_flush_tlb_mm(arg);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ on_each_cpu(ipi_flush_tlb_mm, mm, 1);
+}
+
+static void ipi_flush_tlb_page(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = addr,
+ };
+ on_each_cpu(ipi_flush_tlb_page, &fd, 1);
+}
+
+static void ipi_flush_tlb_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_tlb_range, &fd, 1);
+}
+
+static void ipi_flush_tlb_kernel_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
+}
+
+/* Cache flush functions */
+
+static void ipi_flush_cache_all(void *arg)
+{
+ local_flush_cache_all();
+}
+
+void flush_cache_all(void)
+{
+ on_each_cpu(ipi_flush_cache_all, NULL, 1);
+}
+
+static void ipi_flush_cache_page(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = address,
+ .addr2 = pfn,
+ };
+ on_each_cpu(ipi_flush_cache_page, &fd, 1);
+}
+
+static void ipi_flush_cache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_cache_range, &fd, 1);
+}
+
+static void ipi_flush_icache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_icache_range(fd->addr1, fd->addr2);
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_icache_range, &fd, 1);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+/* ------------------------------------------------------------------------- */
+
+static void ipi_invalidate_dcache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ __invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_invalidate_dcache_range(unsigned long start,
+ unsigned long size)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = size,
+ };
+ on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
+}
+
+static void ipi_flush_invalidate_dcache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_flush_invalidate_dcache_range(unsigned long start,
+ unsigned long size)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = size,
+ };
+ on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
+}
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
new file mode 100644
index 000000000..a94da7dd3
--- /dev/null
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -0,0 +1,270 @@
+/*
+ * Kernel and userspace stack tracing.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+
+#if IS_ENABLED(CONFIG_OPROFILE) || IS_ENABLED(CONFIG_PERF_EVENTS)
+
+/* Address of common_exception_return, used to check the
+ * transition from kernel to user space.
+ */
+extern int common_exception_return;
+
+void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data)
+{
+ unsigned long windowstart = regs->windowstart;
+ unsigned long windowbase = regs->windowbase;
+ unsigned long a0 = regs->areg[0];
+ unsigned long a1 = regs->areg[1];
+ unsigned long pc = regs->pc;
+ struct stackframe frame;
+ int index;
+
+ if (!depth--)
+ return;
+
+ frame.pc = pc;
+ frame.sp = a1;
+
+ if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+ return;
+
+ /* Two steps:
+ *
+ * 1. Look through the register window for the
+ * previous PCs in the call trace.
+ *
+ * 2. Look on the stack.
+ */
+
+ /* Step 1. */
+ /* Rotate WINDOWSTART to move the bit corresponding to
+ * the current window to the bit #0.
+ */
+ windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
+
+ /* Look for bits that are set, they correspond to
+ * valid windows.
+ */
+ for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
+ if (windowstart & (1 << index)) {
+ /* Get the PC from a0 and a1. */
+ pc = MAKE_PC_FROM_RA(a0, pc);
+ /* Read a0 and a1 from the
+ * corresponding position in AREGs.
+ */
+ a0 = regs->areg[index * 4];
+ a1 = regs->areg[index * 4 + 1];
+
+ frame.pc = pc;
+ frame.sp = a1;
+
+ if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+ return;
+ }
+
+ /* Step 2. */
+ /* We are done with the register window, we need to
+ * look through the stack.
+ */
+ if (!depth)
+ return;
+
+ /* Start from the a1 register. */
+ /* a1 = regs->areg[1]; */
+ while (a0 != 0 && depth--) {
+ pc = MAKE_PC_FROM_RA(a0, pc);
+
+ /* Check if the region is OK to access. */
+ if (!access_ok(VERIFY_READ, &SPILL_SLOT(a1, 0), 8))
+ return;
+ /* Copy a1, a0 from user space stack frame. */
+ if (__get_user(a0, &SPILL_SLOT(a1, 0)) ||
+ __get_user(a1, &SPILL_SLOT(a1, 1)))
+ return;
+
+ frame.pc = pc;
+ frame.sp = a1;
+
+ if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+ return;
+ }
+}
+EXPORT_SYMBOL(xtensa_backtrace_user);
+
+void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
+ int (*kfn)(struct stackframe *frame, void *data),
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data)
+{
+ unsigned long pc = regs->depc > VALID_DOUBLE_EXCEPTION_ADDRESS ?
+ regs->depc : regs->pc;
+ unsigned long sp_start, sp_end;
+ unsigned long a0 = regs->areg[0];
+ unsigned long a1 = regs->areg[1];
+
+ sp_start = a1 & ~(THREAD_SIZE - 1);
+ sp_end = sp_start + THREAD_SIZE;
+
+ /* Spill the register window to the stack first. */
+ spill_registers();
+
+ /* Read the stack frames one by one and create the PC
+ * from the a0 and a1 registers saved there.
+ */
+ while (a1 > sp_start && a1 < sp_end && depth--) {
+ struct stackframe frame;
+
+ frame.pc = pc;
+ frame.sp = a1;
+
+ if (kernel_text_address(pc) && kfn(&frame, data))
+ return;
+
+ if (pc == (unsigned long)&common_exception_return) {
+ regs = (struct pt_regs *)a1;
+ if (user_mode(regs)) {
+ if (ufn == NULL)
+ return;
+ xtensa_backtrace_user(regs, depth, ufn, data);
+ return;
+ }
+ a0 = regs->areg[0];
+ a1 = regs->areg[1];
+ continue;
+ }
+
+ sp_start = a1;
+
+ pc = MAKE_PC_FROM_RA(a0, pc);
+ a0 = SPILL_SLOT(a1, 0);
+ a1 = SPILL_SLOT(a1, 1);
+ }
+}
+EXPORT_SYMBOL(xtensa_backtrace_kernel);
+
+#endif
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data)
+{
+ unsigned long a0, a1;
+ unsigned long sp_end;
+
+ a1 = (unsigned long)sp;
+ sp_end = ALIGN(a1, THREAD_SIZE);
+
+ spill_registers();
+
+ while (a1 < sp_end) {
+ struct stackframe frame;
+
+ sp = (unsigned long *)a1;
+
+ a0 = SPILL_SLOT(a1, 0);
+ a1 = SPILL_SLOT(a1, 1);
+
+ if (a1 <= (unsigned long)sp)
+ break;
+
+ frame.pc = MAKE_PC_FROM_RA(a0, a1);
+ frame.sp = a1;
+
+ if (fn(&frame, data))
+ return;
+ }
+}
+
+#ifdef CONFIG_STACKTRACE
+
+struct stack_trace_data {
+ struct stack_trace *trace;
+ unsigned skip;
+};
+
+static int stack_trace_cb(struct stackframe *frame, void *data)
+{
+ struct stack_trace_data *trace_data = data;
+ struct stack_trace *trace = trace_data->trace;
+
+ if (trace_data->skip) {
+ --trace_data->skip;
+ return 0;
+ }
+ if (!kernel_text_address(frame->pc))
+ return 0;
+
+ trace->entries[trace->nr_entries++] = frame->pc;
+ return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+{
+ struct stack_trace_data trace_data = {
+ .trace = trace,
+ .skip = trace->skip,
+ };
+ walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct return_addr_data {
+ unsigned long addr;
+ unsigned skip;
+};
+
+static int return_address_cb(struct stackframe *frame, void *data)
+{
+ struct return_addr_data *r = data;
+
+ if (r->skip) {
+ --r->skip;
+ return 0;
+ }
+ if (!kernel_text_address(frame->pc))
+ return 0;
+ r->addr = frame->pc;
+ return 1;
+}
+
+/*
+ * level == 0 is for the return address from the caller of this function,
+ * not from this function itself.
+ */
+unsigned long return_address(unsigned level)
+{
+ struct return_addr_data r = {
+ .skip = level,
+ };
+ walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+ return r.addr;
+}
+EXPORT_SYMBOL(return_address);
+
+#endif
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
new file mode 100644
index 000000000..8201748da
--- /dev/null
+++ b/arch/xtensa/kernel/syscall.c
@@ -0,0 +1,98 @@
+/*
+ * arch/xtensa/kernel/syscall.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Chris Zankel <chris@zankel.net>
+ * Kevin Chea
+ *
+ */
+#include <linux/uaccess.h>
+#include <asm/syscall.h>
+#include <asm/unistd.h>
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/shm.h>
+
+typedef void (*syscall_t)(void);
+
+syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
+ [0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall,
+
+#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
+#include <uapi/asm/unistd.h>
+};
+
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+ (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
+{
+ unsigned long ret;
+ long err;
+
+ err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
+ if (err)
+ return err;
+ return (long)ret;
+}
+
+asmlinkage long xtensa_fadvise64_64(int fd, int advice,
+ unsigned long long offset, unsigned long long len)
+{
+ return ksys_fadvise64_64(fd, offset, len, advice);
+}
+
+#ifdef CONFIG_MMU
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct vm_area_struct *vmm;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vmm || addr + len <= vm_start_gap(vmm))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ }
+}
+#endif
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
new file mode 100644
index 000000000..378186b5e
--- /dev/null
+++ b/arch/xtensa/kernel/time.c
@@ -0,0 +1,218 @@
+/*
+ * arch/xtensa/kernel/time.c
+ *
+ * Timer and clock support.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+#include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/sched_clock.h>
+
+#include <asm/timex.h>
+#include <asm/platform.h>
+
+unsigned long ccount_freq; /* ccount Hz */
+EXPORT_SYMBOL(ccount_freq);
+
+static u64 ccount_read(struct clocksource *cs)
+{
+ return (u64)get_ccount();
+}
+
+static u64 notrace ccount_sched_clock_read(void)
+{
+ return get_ccount();
+}
+
+static struct clocksource ccount_clocksource = {
+ .name = "ccount",
+ .rating = 200,
+ .read = ccount_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int ccount_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev);
+struct ccount_timer {
+ struct clock_event_device evt;
+ int irq_enabled;
+ char name[24];
+};
+static DEFINE_PER_CPU(struct ccount_timer, ccount_timer);
+
+static int ccount_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ unsigned long flags, next;
+ int ret = 0;
+
+ local_irq_save(flags);
+ next = get_ccount() + delta;
+ set_linux_timer(next);
+ if (next - get_ccount() > delta)
+ ret = -ETIME;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+/*
+ * There is no way to disable the timer interrupt at the device level,
+ * only at the intenable register itself. Since enable_irq/disable_irq
+ * calls are nested, we need to make sure that these calls are
+ * balanced.
+ */
+static int ccount_timer_shutdown(struct clock_event_device *evt)
+{
+ struct ccount_timer *timer =
+ container_of(evt, struct ccount_timer, evt);
+
+ if (timer->irq_enabled) {
+ disable_irq_nosync(evt->irq);
+ timer->irq_enabled = 0;
+ }
+ return 0;
+}
+
+static int ccount_timer_set_oneshot(struct clock_event_device *evt)
+{
+ struct ccount_timer *timer =
+ container_of(evt, struct ccount_timer, evt);
+
+ if (!timer->irq_enabled) {
+ enable_irq(evt->irq);
+ timer->irq_enabled = 1;
+ }
+ return 0;
+}
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id);
+static struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
+ .flags = IRQF_TIMER,
+ .name = "timer",
+};
+
+void local_timer_setup(unsigned cpu)
+{
+ struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
+ struct clock_event_device *clockevent = &timer->evt;
+
+ timer->irq_enabled = 1;
+ clockevent->name = timer->name;
+ snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
+ clockevent->features = CLOCK_EVT_FEAT_ONESHOT;
+ clockevent->rating = 300;
+ clockevent->set_next_event = ccount_timer_set_next_event;
+ clockevent->set_state_shutdown = ccount_timer_shutdown;
+ clockevent->set_state_oneshot = ccount_timer_set_oneshot;
+ clockevent->tick_resume = ccount_timer_set_oneshot;
+ clockevent->cpumask = cpumask_of(cpu);
+ clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
+ if (WARN(!clockevent->irq, "error: can't map timer irq"))
+ return;
+ clockevents_config_and_register(clockevent, ccount_freq,
+ 0xf, 0xffffffff);
+}
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+#ifdef CONFIG_OF
+static void __init calibrate_ccount(void)
+{
+ struct device_node *cpu;
+ struct clk *clk;
+
+ cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
+ if (cpu) {
+ clk = of_clk_get(cpu, 0);
+ if (!IS_ERR(clk)) {
+ ccount_freq = clk_get_rate(clk);
+ return;
+ } else {
+ pr_warn("%s: CPU input clock not found\n",
+ __func__);
+ }
+ } else {
+ pr_warn("%s: CPU node not found in the device tree\n",
+ __func__);
+ }
+
+ platform_calibrate_ccount();
+}
+#else
+static inline void calibrate_ccount(void)
+{
+ platform_calibrate_ccount();
+}
+#endif
+#endif
+
+void __init time_init(void)
+{
+ of_clk_init(NULL);
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+ pr_info("Calibrating CPU frequency ");
+ calibrate_ccount();
+ pr_cont("%d.%02d MHz\n",
+ (int)ccount_freq / 1000000,
+ (int)(ccount_freq / 10000) % 100);
+#else
+ ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
+#endif
+ WARN(!ccount_freq,
+ "%s: CPU clock frequency is not set up correctly\n",
+ __func__);
+ clocksource_register_hz(&ccount_clocksource, ccount_freq);
+ local_timer_setup(0);
+ setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
+ sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
+ timer_probe();
+}
+
+/*
+ * The timer interrupt is called HZ times per second.
+ */
+
+irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
+
+ set_linux_timer(get_linux_timer());
+ evt->event_handler(evt);
+
+ /* Allow platform to do something useful (Wdog). */
+ platform_heartbeat();
+
+ return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
+void calibrate_delay(void)
+{
+ loops_per_jiffy = ccount_freq / HZ;
+ pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
+ loops_per_jiffy / (1000000 / HZ),
+ (loops_per_jiffy / (10000 / HZ)) % 100);
+}
+#endif
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
new file mode 100644
index 000000000..86507fa7c
--- /dev/null
+++ b/arch/xtensa/kernel/traps.c
@@ -0,0 +1,546 @@
+/*
+ * arch/xtensa/kernel/traps.c
+ *
+ * Exception handling.
+ *
+ * Derived from code with the following copyrights:
+ * Copyright (C) 1994 - 1999 by Ralf Baechle
+ * Modified for R3000 by Paul M. Antoine, 1995, 1996
+ * Complete output from die() by Ulf Carlsson, 1998
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ *
+ * Essentially rewritten for the Xtensa architecture port.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include <linux/kallsyms.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/ratelimit.h>
+
+#include <asm/stacktrace.h>
+#include <asm/ptrace.h>
+#include <asm/timex.h>
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/traps.h>
+#include <asm/hw_breakpoint.h>
+
+/*
+ * Machine specific interrupt handlers
+ */
+
+extern void kernel_exception(void);
+extern void user_exception(void);
+
+extern void fast_syscall_kernel(void);
+extern void fast_syscall_user(void);
+extern void fast_alloca(void);
+extern void fast_unaligned(void);
+extern void fast_second_level_miss(void);
+extern void fast_store_prohibited(void);
+extern void fast_coprocessor(void);
+
+extern void do_illegal_instruction (struct pt_regs*);
+extern void do_interrupt (struct pt_regs*);
+extern void do_nmi(struct pt_regs *);
+extern void do_unaligned_user (struct pt_regs*);
+extern void do_multihit (struct pt_regs*, unsigned long);
+extern void do_page_fault (struct pt_regs*, unsigned long);
+extern void do_debug (struct pt_regs*);
+extern void system_call (struct pt_regs*);
+
+/*
+ * The vector table must be preceded by a save area (which
+ * implies it must be in RAM, unless one places RAM immediately
+ * before a ROM and puts the vector at the start of the ROM (!))
+ */
+
+#define KRNL 0x01
+#define USER 0x02
+
+#define COPROCESSOR(x) \
+{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
+
+typedef struct {
+ int cause;
+ int fast;
+ void* handler;
+} dispatch_init_table_t;
+
+static dispatch_init_table_t __initdata dispatch_init_table[] = {
+
+{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
+{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
+{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
+{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
+/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
+/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
+{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
+{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
+/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
+/* EXCCAUSE_PRIVILEGED unhandled */
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+#ifdef CONFIG_XTENSA_UNALIGNED_USER
+{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
+#endif
+{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
+{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
+#endif
+#ifdef CONFIG_MMU
+{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
+{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
+{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
+/* EXCCAUSE_SIZE_RESTRICTION unhandled */
+{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
+{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
+{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
+{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
+{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
+/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
+{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
+{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
+#endif /* CONFIG_MMU */
+/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
+#if XTENSA_HAVE_COPROCESSOR(0)
+COPROCESSOR(0),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(1)
+COPROCESSOR(1),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(2)
+COPROCESSOR(2),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(3)
+COPROCESSOR(3),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(4)
+COPROCESSOR(4),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(5)
+COPROCESSOR(5),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(6)
+COPROCESSOR(6),
+#endif
+#if XTENSA_HAVE_COPROCESSOR(7)
+COPROCESSOR(7),
+#endif
+#if XTENSA_FAKE_NMI
+{ EXCCAUSE_MAPPED_NMI, 0, do_nmi },
+#endif
+{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
+{ -1, -1, 0 }
+
+};
+
+/* The exception table <exc_table> serves two functions:
+ * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
+ * 2. it is a temporary memory buffer for the exception handlers.
+ */
+
+DEFINE_PER_CPU(struct exc_table, exc_table);
+DEFINE_PER_CPU(struct debug_table, debug_table);
+
+void die(const char*, struct pt_regs*, long);
+
+static inline void
+__die_if_kernel(const char *str, struct pt_regs *regs, long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+/*
+ * Unhandled Exceptions. Kill user task or panic if in kernel space.
+ */
+
+void do_unhandled(struct pt_regs *regs, unsigned long exccause)
+{
+ __die_if_kernel("Caught unhandled exception - should not happen",
+ regs, SIGKILL);
+
+ /* If in user mode, send SIGILL signal to current process */
+ pr_info_ratelimited("Caught unhandled exception in '%s' "
+ "(pid = %d, pc = %#010lx) - should not happen\n"
+ "\tEXCCAUSE is %ld\n",
+ current->comm, task_pid_nr(current), regs->pc,
+ exccause);
+ force_sig(SIGILL, current);
+}
+
+/*
+ * Multi-hit exception. This if fatal!
+ */
+
+void do_multihit(struct pt_regs *regs, unsigned long exccause)
+{
+ die("Caught multihit exception", regs, SIGKILL);
+}
+
+/*
+ * IRQ handler.
+ */
+
+extern void do_IRQ(int, struct pt_regs *);
+
+#if XTENSA_FAKE_NMI
+
+#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+
+#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
+ IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
+#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
+#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."
+
+static inline void check_valid_nmi(void)
+{
+ unsigned intread = get_sr(interrupt);
+ unsigned intenable = get_sr(intenable);
+
+ BUG_ON(intread & intenable &
+ ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
+ XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
+ BIT(XCHAL_PROFILING_INTERRUPT)));
+}
+
+#else
+
+static inline void check_valid_nmi(void)
+{
+}
+
+#endif
+
+irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
+
+DEFINE_PER_CPU(unsigned long, nmi_count);
+
+void do_nmi(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+
+ if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
+ trace_hardirqs_off();
+
+ old_regs = set_irq_regs(regs);
+ nmi_enter();
+ ++*this_cpu_ptr(&nmi_count);
+ check_valid_nmi();
+ xtensa_pmu_irq_handler(0, NULL);
+ nmi_exit();
+ set_irq_regs(old_regs);
+}
+#endif
+
+void do_interrupt(struct pt_regs *regs)
+{
+ static const unsigned int_level_mask[] = {
+ 0,
+ XCHAL_INTLEVEL1_MASK,
+ XCHAL_INTLEVEL2_MASK,
+ XCHAL_INTLEVEL3_MASK,
+ XCHAL_INTLEVEL4_MASK,
+ XCHAL_INTLEVEL5_MASK,
+ XCHAL_INTLEVEL6_MASK,
+ XCHAL_INTLEVEL7_MASK,
+ };
+ struct pt_regs *old_regs;
+
+ trace_hardirqs_off();
+
+ old_regs = set_irq_regs(regs);
+ irq_enter();
+
+ for (;;) {
+ unsigned intread = get_sr(interrupt);
+ unsigned intenable = get_sr(intenable);
+ unsigned int_at_level = intread & intenable;
+ unsigned level;
+
+ for (level = LOCKLEVEL; level > 0; --level) {
+ if (int_at_level & int_level_mask[level]) {
+ int_at_level &= int_level_mask[level];
+ break;
+ }
+ }
+
+ if (level == 0)
+ break;
+
+ do_IRQ(__ffs(int_at_level), regs);
+ }
+
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+/*
+ * Illegal instruction. Fatal if in kernel space.
+ */
+
+void
+do_illegal_instruction(struct pt_regs *regs)
+{
+ __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
+
+ /* If in user mode, send SIGILL signal to current process. */
+
+ pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
+ current->comm, task_pid_nr(current), regs->pc);
+ force_sig(SIGILL, current);
+}
+
+
+/*
+ * Handle unaligned memory accesses from user space. Kill task.
+ *
+ * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
+ * accesses causes from user space.
+ */
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
+void
+do_unaligned_user (struct pt_regs *regs)
+{
+ __die_if_kernel("Unhandled unaligned exception in kernel",
+ regs, SIGKILL);
+
+ current->thread.bad_vaddr = regs->excvaddr;
+ current->thread.error_code = -3;
+ pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
+ "(pid = %d, pc = %#010lx)\n",
+ regs->excvaddr, current->comm,
+ task_pid_nr(current), regs->pc);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr, current);
+}
+#endif
+
+/* Handle debug events.
+ * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with
+ * preemption disabled to avoid rescheduling and keep mapping of hardware
+ * breakpoint structures to debug registers intact, so that
+ * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit.
+ */
+void
+do_debug(struct pt_regs *regs)
+{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int ret = check_hw_breakpoint(regs);
+
+ preempt_enable();
+ if (ret == 0)
+ return;
+#endif
+ __die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
+
+ /* If in user mode, send SIGTRAP signal to current process */
+
+ force_sig(SIGTRAP, current);
+}
+
+
+#define set_handler(type, cause, handler) \
+ do { \
+ unsigned int cpu; \
+ \
+ for_each_possible_cpu(cpu) \
+ per_cpu(exc_table, cpu).type[cause] = (handler);\
+ } while (0)
+
+/* Set exception C handler - for temporary use when probing exceptions */
+
+void * __init trap_set_handler(int cause, void *handler)
+{
+ void *previous = per_cpu(exc_table, 0).default_handler[cause];
+
+ set_handler(default_handler, cause, handler);
+ return previous;
+}
+
+
+static void trap_init_excsave(void)
+{
+ unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table);
+ __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
+}
+
+static void trap_init_debug(void)
+{
+ unsigned long debugsave = (unsigned long)this_cpu_ptr(&debug_table);
+
+ this_cpu_ptr(&debug_table)->debug_exception = debug_exception;
+ __asm__ __volatile__("wsr %0, excsave" __stringify(XCHAL_DEBUGLEVEL)
+ :: "a"(debugsave));
+}
+
+/*
+ * Initialize dispatch tables.
+ *
+ * The exception vectors are stored compressed the __init section in the
+ * dispatch_init_table. This function initializes the following three tables
+ * from that compressed table:
+ * - fast user first dispatch table for user exceptions
+ * - fast kernel first dispatch table for kernel exceptions
+ * - default C-handler C-handler called by the default fast handler.
+ *
+ * See vectors.S for more details.
+ */
+
+void __init trap_init(void)
+{
+ int i;
+
+ /* Setup default vectors. */
+
+ for (i = 0; i < EXCCAUSE_N; i++) {
+ set_handler(fast_user_handler, i, user_exception);
+ set_handler(fast_kernel_handler, i, kernel_exception);
+ set_handler(default_handler, i, do_unhandled);
+ }
+
+ /* Setup specific handlers. */
+
+ for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
+
+ int fast = dispatch_init_table[i].fast;
+ int cause = dispatch_init_table[i].cause;
+ void *handler = dispatch_init_table[i].handler;
+
+ if (fast == 0)
+ set_handler(default_handler, cause, handler);
+ if (fast && fast & USER)
+ set_handler(fast_user_handler, cause, handler);
+ if (fast && fast & KRNL)
+ set_handler(fast_kernel_handler, cause, handler);
+ }
+
+ /* Initialize EXCSAVE_1 to hold the address of the exception table. */
+ trap_init_excsave();
+ trap_init_debug();
+}
+
+#ifdef CONFIG_SMP
+void secondary_trap_init(void)
+{
+ trap_init_excsave();
+ trap_init_debug();
+}
+#endif
+
+/*
+ * This function dumps the current valid window frame and other base registers.
+ */
+
+void show_regs(struct pt_regs * regs)
+{
+ int i, wmask;
+
+ show_regs_print_info(KERN_DEFAULT);
+
+ wmask = regs->wmask & ~1;
+
+ for (i = 0; i < 16; i++) {
+ if ((i % 8) == 0)
+ pr_info("a%02d:", i);
+ pr_cont(" %08lx", regs->areg[i]);
+ }
+ pr_cont("\n");
+ pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
+ regs->pc, regs->ps, regs->depc, regs->excvaddr);
+ pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
+ regs->lbeg, regs->lend, regs->lcount, regs->sar);
+ if (user_mode(regs))
+ pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
+ regs->windowbase, regs->windowstart, regs->wmask,
+ regs->syscall);
+}
+
+static int show_trace_cb(struct stackframe *frame, void *data)
+{
+ if (kernel_text_address(frame->pc))
+ pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
+ return 0;
+}
+
+void show_trace(struct task_struct *task, unsigned long *sp)
+{
+ if (!sp)
+ sp = stack_pointer(task);
+
+ pr_info("Call Trace:\n");
+ walk_stackframe(sp, show_trace_cb, NULL);
+#ifndef CONFIG_KALLSYMS
+ pr_cont("\n");
+#endif
+}
+
+static int kstack_depth_to_print = 24;
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ int i = 0;
+ unsigned long *stack;
+
+ if (!sp)
+ sp = stack_pointer(task);
+ stack = sp;
+
+ pr_info("Stack:\n");
+
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (kstack_end(sp))
+ break;
+ pr_cont(" %08lx", *sp++);
+ if (i % 8 == 7)
+ pr_cont("\n");
+ }
+ show_trace(task, stack);
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ static int die_counter;
+
+ console_verbose();
+ spin_lock_irq(&die_lock);
+
+ pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
+ IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
+ show_regs(regs);
+ if (!user_mode(regs))
+ show_stack(NULL, (unsigned long*)regs->areg[1]);
+
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irq(&die_lock);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ do_exit(err);
+}
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
new file mode 100644
index 000000000..841503d33
--- /dev/null
+++ b/arch/xtensa/kernel/vectors.S
@@ -0,0 +1,791 @@
+/*
+ * arch/xtensa/kernel/vectors.S
+ *
+ * This file contains all exception vectors (user, kernel, and double),
+ * as well as the window vectors (overflow and underflow), and the debug
+ * vector. These are the primary vectors executed by the processor if an
+ * exception occurs.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2005 - 2008 Tensilica, Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ *
+ */
+
+/*
+ * We use a two-level table approach. The user and kernel exception vectors
+ * use a first-level dispatch table to dispatch the exception to a registered
+ * fast handler or the default handler, if no fast handler was registered.
+ * The default handler sets up a C-stack and dispatches the exception to a
+ * registerd C handler in the second-level dispatch table.
+ *
+ * Fast handler entry condition:
+ *
+ * a0: trashed, original value saved on stack (PT_AREG0)
+ * a1: a1
+ * a2: new stack pointer, original value in depc
+ * a3: dispatch table
+ * depc: a2, original value saved on stack (PT_DEPC)
+ * excsave_1: a3
+ *
+ * The value for PT_DEPC saved to stack also functions as a boolean to
+ * indicate that the exception is either a double or a regular exception:
+ *
+ * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception
+ * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
+ *
+ * Note: Neither the kernel nor the user exception handler generate literals.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/asm-offsets.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <asm/vectors.h>
+
+#define WINDOW_VECTORS_SIZE 0x180
+
+
+/*
+ * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
+ *
+ * We get here when an exception occurred while we were in userland.
+ * We switch to the kernel stack and jump to the first level handler
+ * associated to the exception cause.
+ *
+ * Note: the saved kernel stack pointer (EXC_TABLE_KSTK) is already
+ * decremented by PT_USER_SIZE.
+ */
+
+ .section .UserExceptionVector.text, "ax"
+
+ENTRY(_UserExceptionVector)
+
+ xsr a3, excsave1 # save a3 and get dispatch table
+ wsr a2, depc # save a2
+ l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
+ s32i a0, a2, PT_AREG0 # save a0 to ESF
+ rsr a0, exccause # retrieve exception cause
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ xsr a3, excsave1 # restore a3 and dispatch table
+ jx a0
+
+ENDPROC(_UserExceptionVector)
+
+/*
+ * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
+ *
+ * We get this exception when we were already in kernel space.
+ * We decrement the current stack pointer (kernel) by PT_SIZE and
+ * jump to the first-level handler associated with the exception cause.
+ *
+ * Note: we need to preserve space for the spill region.
+ */
+
+ .section .KernelExceptionVector.text, "ax"
+
+ENTRY(_KernelExceptionVector)
+
+ xsr a3, excsave1 # save a3, and get dispatch table
+ wsr a2, depc # save a2
+ addi a2, a1, -16-PT_SIZE # adjust stack pointer
+ s32i a0, a2, PT_AREG0 # save a0 to ESF
+ rsr a0, exccause # retrieve exception cause
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
+ xsr a3, excsave1 # restore a3 and dispatch table
+ jx a0
+
+ENDPROC(_KernelExceptionVector)
+
+/*
+ * Double exception vector (Exceptions with PS.EXCM == 1)
+ * We get this exception when another exception occurs while were are
+ * already in an exception, such as window overflow/underflow exception,
+ * or 'expected' exceptions, for example memory exception when we were trying
+ * to read data from an invalid address in user space.
+ *
+ * Note that this vector is never invoked for level-1 interrupts, because such
+ * interrupts are disabled (masked) when PS.EXCM is set.
+ *
+ * We decode the exception and take the appropriate action. However, the
+ * double exception vector is much more careful, because a lot more error
+ * cases go through the double exception vector than through the user and
+ * kernel exception vectors.
+ *
+ * Occasionally, the kernel expects a double exception to occur. This usually
+ * happens when accessing user-space memory with the user's permissions
+ * (l32e/s32e instructions). The kernel state, though, is not always suitable
+ * for immediate transfer of control to handle_double, where "normal" exception
+ * processing occurs. Also in kernel mode, TLB misses can occur if accessing
+ * vmalloc memory, possibly requiring repair in a double exception handler.
+ *
+ * The variable at TABLE_FIXUP offset from the pointer in EXCSAVE_1 doubles as
+ * a boolean variable and a pointer to a fixup routine. If the variable
+ * EXC_TABLE_FIXUP is non-zero, this handler jumps to that address. A value of
+ * zero indicates to use the default kernel/user exception handler.
+ * There is only one exception, when the value is identical to the exc_table
+ * label, the kernel is in trouble. This mechanism is used to protect critical
+ * sections, mainly when the handler writes to the stack to assert the stack
+ * pointer is valid. Once the fixup/default handler leaves that area, the
+ * EXC_TABLE_FIXUP variable is reset to the fixup handler or zero.
+ *
+ * Procedures wishing to use this mechanism should set EXC_TABLE_FIXUP to the
+ * nonzero address of a fixup routine before it could cause a double exception
+ * and reset it before it returns.
+ *
+ * Some other things to take care of when a fast exception handler doesn't
+ * specify a particular fixup handler but wants to use the default handlers:
+ *
+ * - The original stack pointer (in a1) must not be modified. The fast
+ * exception handler should only use a2 as the stack pointer.
+ *
+ * - If the fast handler manipulates the stack pointer (in a2), it has to
+ * register a valid fixup handler and cannot use the default handlers.
+ *
+ * - The handler can use any other generic register from a3 to a15, but it
+ * must save the content of these registers to stack (PT_AREG3...PT_AREGx)
+ *
+ * - These registers must be saved before a double exception can occur.
+ *
+ * - If we ever implement handling signals while in double exceptions, the
+ * number of registers a fast handler has saved (excluding a0 and a1) must
+ * be written to PT_AREG1. (1 if only a3 is used, 2 for a3 and a4, etc. )
+ *
+ * The fixup handlers are special handlers:
+ *
+ * - Fixup entry conditions differ from regular exceptions:
+ *
+ * a0: DEPC
+ * a1: a1
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable
+ * depc: a0
+ * excsave_1: a3
+ *
+ * - When the kernel enters the fixup handler, it still assumes it is in a
+ * critical section, so EXC_TABLE_FIXUP variable is set to exc_table.
+ * The fixup handler, therefore, has to re-register itself as the fixup
+ * handler before it returns from the double exception.
+ *
+ * - Fixup handler can share the same exception frame with the fast handler.
+ * The kernel stack pointer is not changed when entering the fixup handler.
+ *
+ * - Fixup handlers can jump to the default kernel and user exception
+ * handlers. Before it jumps, though, it has to setup a exception frame
+ * on stack. Because the default handler resets the register fixup handler
+ * the fixup handler must make sure that the default handler returns to
+ * it instead of the exception address, so it can re-register itself as
+ * the fixup handler.
+ *
+ * In case of a critical condition where the kernel cannot recover, we jump
+ * to unrecoverable_exception with the following entry conditions.
+ * All registers a0...a15 are unchanged from the last exception, except:
+ *
+ * a0: last address before we jumped to the unrecoverable_exception.
+ * excsave_1: a0
+ *
+ *
+ * See the handle_alloca_user and spill_registers routines for example clients.
+ *
+ * FIXME: Note: we currently don't allow signal handling coming from a double
+ * exception, so the item markt with (*) is not required.
+ */
+
+ .section .DoubleExceptionVector.text, "ax"
+
+ENTRY(_DoubleExceptionVector)
+
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+
+ /* Check for kernel double exception (usually fatal). */
+
+ rsr a2, ps
+ _bbsi.l a2, PS_UM_BIT, 1f
+ j .Lksp
+
+ .align 4
+ .literal_position
+1:
+ /* Check if we are currently handling a window exception. */
+ /* Note: We don't need to indicate that we enter a critical section. */
+
+ xsr a0, depc # get DEPC, save a0
+
+ movi a2, WINDOW_VECTORS_VADDR
+ _bltu a0, a2, .Lfixup
+ addi a2, a2, WINDOW_VECTORS_SIZE
+ _bgeu a0, a2, .Lfixup
+
+ /* Window overflow/underflow exception. Get stack pointer. */
+
+ l32i a2, a3, EXC_TABLE_KSTK
+
+ /* Check for overflow/underflow exception, jump if overflow. */
+
+ bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
+
+ /*
+ * Restart window underflow exception.
+ * Currently:
+ * depc = orig a0,
+ * a0 = orig DEPC,
+ * a2 = new sp based on KSTK from exc_table
+ * a3 = excsave_1
+ * excsave_1 = orig a3
+ *
+ * We return to the instruction in user space that caused the window
+ * underflow exception. Therefore, we change window base to the value
+ * before we entered the window underflow exception and prepare the
+ * registers to return as if we were coming from a regular exception
+ * by changing depc (in a0).
+ * Note: We can trash the current window frame (a0...a3) and depc!
+ */
+_DoubleExceptionVector_WindowUnderflow:
+ xsr a3, excsave1
+ wsr a2, depc # save stack pointer temporarily
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ wsr a0, windowbase
+ rsync
+
+ /* We are now in the previous window frame. Save registers again. */
+
+ xsr a2, depc # save a2 and get stack pointer
+ s32i a0, a2, PT_AREG0
+ xsr a3, excsave1
+ rsr a0, exccause
+ s32i a0, a2, PT_DEPC # mark it as a regular exception
+ addx4 a0, a0, a3
+ xsr a3, excsave1
+ l32i a0, a0, EXC_TABLE_FAST_USER
+ jx a0
+
+ /*
+ * We only allow the ITLB miss exception if we are in kernel space.
+ * All other exceptions are unexpected and thus unrecoverable!
+ */
+
+#ifdef CONFIG_MMU
+ .extern fast_second_level_miss_double_kernel
+
+.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+
+ rsr a3, exccause
+ beqi a3, EXCCAUSE_ITLB_MISS, 1f
+ addi a3, a3, -EXCCAUSE_DTLB_MISS
+ bnez a3, .Lunrecoverable
+1: movi a3, fast_second_level_miss_double_kernel
+ jx a3
+#else
+.equ .Lksp, .Lunrecoverable
+#endif
+
+ /* Critical! We can't handle this situation. PANIC! */
+
+ .extern unrecoverable_exception
+
+.Lunrecoverable_fixup:
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a0, depc
+
+.Lunrecoverable:
+ rsr a3, excsave1
+ wsr a0, excsave1
+ call0 unrecoverable_exception
+
+.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
+
+ /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave1: a3 */
+
+ /* Enter critical section. */
+
+ l32i a2, a3, EXC_TABLE_FIXUP
+ s32i a3, a3, EXC_TABLE_FIXUP
+ beq a2, a3, .Lunrecoverable_fixup # critical section
+ beqz a2, .Ldflt # no handler was registered
+
+ /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
+
+ jx a2
+
+.Ldflt: /* Get stack pointer. */
+
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ addi a2, a2, -PT_USER_SIZE
+
+ /* a0: depc, a1: a1, a2: kstk, a3: exctable, depc: a0, excsave: a3 */
+
+ s32i a0, a2, PT_DEPC
+ l32i a0, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a0, depc
+ s32i a0, a2, PT_AREG0
+
+ /* a0: avail, a1: a1, a2: kstk, a3: exctable, depc: a2, excsave: a3 */
+
+ rsr a0, exccause
+ addx4 a0, a0, a3
+ xsr a3, excsave1
+ l32i a0, a0, EXC_TABLE_FAST_USER
+ jx a0
+
+ /*
+ * Restart window OVERFLOW exception.
+ * Currently:
+ * depc = orig a0,
+ * a0 = orig DEPC,
+ * a2 = new sp based on KSTK from exc_table
+ * a3 = EXCSAVE_1
+ * excsave_1 = orig a3
+ *
+ * We return to the instruction in user space that caused the window
+ * overflow exception. Therefore, we change window base to the value
+ * before we entered the window overflow exception and prepare the
+ * registers to return as if we were coming from a regular exception
+ * by changing DEPC (in a0).
+ *
+ * NOTE: We CANNOT trash the current window frame (a0...a3), but we
+ * can clobber depc.
+ *
+ * The tricky part here is that overflow8 and overflow12 handlers
+ * save a0, then clobber a0. To restart the handler, we have to restore
+ * a0 if the double exception was past the point where a0 was clobbered.
+ *
+ * To keep things simple, we take advantage of the fact all overflow
+ * handlers save a0 in their very first instruction. If DEPC was past
+ * that instruction, we can safely restore a0 from where it was saved
+ * on the stack.
+ *
+ * a0: depc, a1: a1, a2: kstk, a3: exc_table, depc: a0, excsave1: a3
+ */
+_DoubleExceptionVector_WindowOverflow:
+ extui a2, a0, 0, 6 # get offset into 64-byte vector handler
+ beqz a2, 1f # if at start of vector, don't restore
+
+ addi a0, a0, -128
+ bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
+
+ /*
+ * This fixup handler is for the extremely unlikely case where the
+ * overflow handler's reference thru a0 gets a hardware TLB refill
+ * that bumps out the (distinct, aliasing) TLB entry that mapped its
+ * prior references thru a9/a13, and where our reference now thru
+ * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+ */
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+
+ bbsi.l a0, 7, 2f
+
+ /*
+ * Restore a0 as saved by _WindowOverflow8().
+ */
+
+ l32e a0, a9, -16
+ wsr a0, depc # replace the saved a0
+ j 3f
+
+2:
+ /*
+ * Restore a0 as saved by _WindowOverflow12().
+ */
+
+ l32e a0, a13, -16
+ wsr a0, depc # replace the saved a0
+3:
+ xsr a3, excsave1
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+1:
+ /*
+ * Restore WindowBase while leaving all address registers restored.
+ * We have to use ROTW for this, because WSR.WINDOWBASE requires
+ * an address register (which would prevent restore).
+ *
+ * Window Base goes from 0 ... 7 (Module 8)
+ * Window Start is 8 bits; Ex: (0b1010 1010):0x55 from series of call4s
+ */
+
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ rsr a2, windowbase
+ sub a0, a2, a0
+ extui a0, a0, 0, 3
+
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ beqi a0, 1, .L1pane
+ beqi a0, 3, .L3pane
+
+ rsr a0, depc
+ rotw -2
+
+ /*
+ * We are now in the user code's original window frame.
+ * Process the exception as a user exception as if it was
+ * taken by the user code.
+ *
+ * This is similar to the user exception vector,
+ * except that PT_DEPC isn't set to EXCCAUSE.
+ */
+1:
+ xsr a3, excsave1
+ wsr a2, depc
+ l32i a2, a3, EXC_TABLE_KSTK
+ s32i a0, a2, PT_AREG0
+ rsr a0, exccause
+
+ s32i a0, a2, PT_DEPC
+
+_DoubleExceptionVector_handle_exception:
+ addi a0, a0, -EXCCAUSE_UNALIGNED
+ beqz a0, 2f
+ addx4 a0, a0, a3
+ l32i a0, a0, EXC_TABLE_FAST_USER + 4 * EXCCAUSE_UNALIGNED
+ xsr a3, excsave1
+ jx a0
+2:
+ movi a0, user_exception
+ xsr a3, excsave1
+ jx a0
+
+.L1pane:
+ rsr a0, depc
+ rotw -1
+ j 1b
+
+.L3pane:
+ rsr a0, depc
+ rotw -3
+ j 1b
+
+ENDPROC(_DoubleExceptionVector)
+
+ .text
+/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ * (we'll need to rotate window back and there's no place to save this
+ * information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ * about the conditions of the original double exception that happened in
+ * the window overflow handler is lost, so we just return to userspace to
+ * retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ .literal_position
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ rsr a2, windowbase
+ sub a0, a2, a0
+ extui a0, a0, 0, 3
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+
+ _beqi a0, 1, .Lhandle_1
+ _beqi a0, 3, .Lhandle_3
+
+ .macro overflow_fixup_handle_exception_pane n
+
+ rsr a0, depc
+ rotw -\n
+
+ xsr a3, excsave1
+ wsr a2, depc
+ l32i a2, a3, EXC_TABLE_KSTK
+ s32i a0, a2, PT_AREG0
+
+ movi a0, .Lrestore_\n
+ s32i a0, a2, PT_DEPC
+ rsr a0, exccause
+ j _DoubleExceptionVector_handle_exception
+
+ .endm
+
+ overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+ overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+ overflow_fixup_handle_exception_pane 3
+
+ .macro overflow_fixup_restore_a0_pane n
+
+ rotw \n
+ /* Need to preserve a0 value here to be able to handle exception
+ * that may occur on a0 reload from stack. It may occur because
+ * TLB miss handler may not be atomic and pointer to page table
+ * may be lost before we get here. There are no free registers,
+ * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+ */
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ bbsi.l a0, 7, 1f
+ l32e a0, a9, -16
+ j 2f
+1:
+ l32e a0, a13, -16
+2:
+ rotw -\n
+
+ .endm
+
+.Lrestore_2:
+ overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, 0
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ rfe
+
+.Lrestore_1:
+ overflow_fixup_restore_a0_pane 1
+ j .Lset_default_fixup
+.Lrestore_3:
+ overflow_fixup_restore_a0_pane 3
+ j .Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+/*
+ * Debug interrupt vector
+ *
+ * There is not much space here, so simply jump to another handler.
+ * EXCSAVE[DEBUGLEVEL] has been set to that handler.
+ */
+
+ .section .DebugInterruptVector.text, "ax"
+
+ENTRY(_DebugInterruptVector)
+
+ xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+ s32i a0, a3, DT_DEBUG_SAVE
+ l32i a0, a3, DT_DEBUG_EXCEPTION
+ jx a0
+
+ENDPROC(_DebugInterruptVector)
+
+
+
+/*
+ * Medium priority level interrupt vectors
+ *
+ * Each takes less than 16 (0x10) bytes, no literals, by placing
+ * the extra 8 bytes that would otherwise be required in the window
+ * vectors area where there is space. With relocatable vectors,
+ * all vectors are within ~ 4 kB range of each other, so we can
+ * simply jump (J) to another vector without having to use JX.
+ *
+ * common_exception code gets current IRQ level in PS.INTLEVEL
+ * and preserves it for the IRQ handling time.
+ */
+
+ .macro irq_entry_level level
+
+ .if XCHAL_EXCM_LEVEL >= \level
+ .section .Level\level\()InterruptVector.text, "ax"
+ENTRY(_Level\level\()InterruptVector)
+ wsr a0, excsave2
+ rsr a0, epc\level
+ wsr a0, epc1
+ .if \level <= LOCKLEVEL
+ movi a0, EXCCAUSE_LEVEL1_INTERRUPT
+ .else
+ movi a0, EXCCAUSE_MAPPED_NMI
+ .endif
+ wsr a0, exccause
+ rsr a0, eps\level
+ # branch to user or kernel vector
+ j _SimulateUserKernelVectorException
+ .endif
+
+ .endm
+
+ irq_entry_level 2
+ irq_entry_level 3
+ irq_entry_level 4
+ irq_entry_level 5
+ irq_entry_level 6
+
+
+/* Window overflow and underflow handlers.
+ * The handlers must be 64 bytes apart, first starting with the underflow
+ * handlers underflow-4 to underflow-12, then the overflow handlers
+ * overflow-4 to overflow-12.
+ *
+ * Note: We rerun the underflow handlers if we hit an exception, so
+ * we try to access any page that would cause a page fault early.
+ */
+
+#define ENTRY_ALIGN64(name) \
+ .globl name; \
+ .align 64; \
+ name:
+
+ .section .WindowVectors.text, "ax"
+
+
+/* 4-Register Window Overflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowOverflow4)
+
+ s32e a0, a5, -16
+ s32e a1, a5, -12
+ s32e a2, a5, -8
+ s32e a3, a5, -4
+ rfwo
+
+ENDPROC(_WindowOverflow4)
+
+
+#if XCHAL_EXCM_LEVEL >= 2
+ /* Not a window vector - but a convenient location
+ * (where we know there's space) for continuation of
+ * medium priority interrupt dispatch code.
+ * On entry here, a0 contains PS, and EPC2 contains saved a0:
+ */
+ .align 4
+_SimulateUserKernelVectorException:
+ addi a0, a0, (1 << PS_EXCM_BIT)
+#if !XTENSA_FAKE_NMI
+ wsr a0, ps
+#endif
+ bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
+ xsr a0, excsave2 # restore a0
+ j _KernelExceptionVector # simulate kernel vector exception
+1: xsr a0, excsave2 # restore a0
+ j _UserExceptionVector # simulate user vector exception
+#endif
+
+
+/* 4-Register Window Underflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowUnderflow4)
+
+ l32e a0, a5, -16
+ l32e a1, a5, -12
+ l32e a2, a5, -8
+ l32e a3, a5, -4
+ rfwu
+
+ENDPROC(_WindowUnderflow4)
+
+/* 8-Register Window Overflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowOverflow8)
+
+ s32e a0, a9, -16
+ l32e a0, a1, -12
+ s32e a2, a9, -8
+ s32e a1, a9, -12
+ s32e a3, a9, -4
+ s32e a4, a0, -32
+ s32e a5, a0, -28
+ s32e a6, a0, -24
+ s32e a7, a0, -20
+ rfwo
+
+ENDPROC(_WindowOverflow8)
+
+/* 8-Register Window Underflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowUnderflow8)
+
+ l32e a1, a9, -12
+ l32e a0, a9, -16
+ l32e a7, a1, -12
+ l32e a2, a9, -8
+ l32e a4, a7, -32
+ l32e a3, a9, -4
+ l32e a5, a7, -28
+ l32e a6, a7, -24
+ l32e a7, a7, -20
+ rfwu
+
+ENDPROC(_WindowUnderflow8)
+
+/* 12-Register Window Overflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowOverflow12)
+
+ s32e a0, a13, -16
+ l32e a0, a1, -12
+ s32e a1, a13, -12
+ s32e a2, a13, -8
+ s32e a3, a13, -4
+ s32e a4, a0, -48
+ s32e a5, a0, -44
+ s32e a6, a0, -40
+ s32e a7, a0, -36
+ s32e a8, a0, -32
+ s32e a9, a0, -28
+ s32e a10, a0, -24
+ s32e a11, a0, -20
+ rfwo
+
+ENDPROC(_WindowOverflow12)
+
+/* 12-Register Window Underflow Vector (Handler) */
+
+ENTRY_ALIGN64(_WindowUnderflow12)
+
+ l32e a1, a13, -12
+ l32e a0, a13, -16
+ l32e a11, a1, -12
+ l32e a2, a13, -8
+ l32e a4, a11, -48
+ l32e a8, a11, -32
+ l32e a3, a13, -4
+ l32e a5, a11, -44
+ l32e a6, a11, -40
+ l32e a7, a11, -36
+ l32e a9, a11, -28
+ l32e a10, a11, -24
+ l32e a11, a11, -20
+ rfwu
+
+ENDPROC(_WindowUnderflow12)
+
+ .text
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..fa926995d
--- /dev/null
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -0,0 +1,336 @@
+/*
+ * arch/xtensa/kernel/vmlinux.lds.S
+ *
+ * Xtensa linker script
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+#include <asm/vectors.h>
+#include <variant/core.h>
+
+OUTPUT_ARCH(xtensa)
+ENTRY(_start)
+
+#ifdef __XTENSA_EB__
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+/* Note: In the following macros, it would be nice to specify only the
+ vector name and section kind and construct "sym" and "section" using
+ CPP concatenation, but that does not work reliably. Concatenating a
+ string with "." produces an invalid token. CPP will not print a
+ warning because it thinks this is an assembly file, but it leaves
+ them as multiple tokens and there may or may not be whitespace
+ between them. */
+
+/* Macro for a relocation entry */
+
+#define RELOCATE_ENTRY(sym, section) \
+ LONG(sym ## _start); \
+ LONG(sym ## _end); \
+ LONG(LOADADDR(section))
+
+/*
+ * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is
+ * defined code for every vector is located with other init data. At startup
+ * time head.S copies code for every vector to its final position according
+ * to description recorded in the corresponding RELOCATE_ENTRY.
+ */
+
+#ifdef CONFIG_VECTORS_OFFSET
+#define SECTION_VECTOR(sym, section, addr, prevsec) \
+ section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
+ { \
+ . = ALIGN(4); \
+ sym ## _start = ABSOLUTE(.); \
+ *(section) \
+ sym ## _end = ABSOLUTE(.); \
+ }
+#else
+#define SECTION_VECTOR(section, addr) \
+ . = addr; \
+ *(section)
+#endif
+
+/*
+ * Mapping of input sections to output sections when linking.
+ */
+
+SECTIONS
+{
+ . = KERNELOFFSET;
+ /* .text section */
+
+ _text = .;
+ _stext = .;
+
+ .text :
+ {
+ /* The HEAD_TEXT section must be the first section! */
+ HEAD_TEXT
+
+#ifndef CONFIG_VECTORS_OFFSET
+ . = ALIGN(PAGE_SIZE);
+ _vecbase = .;
+
+ SECTION_VECTOR (.WindowVectors.text, WINDOW_VECTORS_VADDR)
+#if XCHAL_EXCM_LEVEL >= 2
+ SECTION_VECTOR (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ SECTION_VECTOR (.Level3InterruptVector.text, INTLEVEL3_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ SECTION_VECTOR (.Level4InterruptVector.text, INTLEVEL4_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ SECTION_VECTOR (.Level5InterruptVector.text, INTLEVEL5_VECTOR_VADDR)
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR)
+#endif
+ SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR)
+ SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
+ SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
+ SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
+#endif
+
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ ENTRY_TEXT
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+
+ }
+ _etext = .;
+ PROVIDE (etext = .);
+
+ . = ALIGN(16);
+
+ RODATA
+
+ /* Relocation table */
+
+ .fixup : { *(.fixup) }
+
+ EXCEPTION_TABLE(16)
+ NOTES
+ /* Data section */
+
+ _sdata = .;
+ RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+
+ /* Initialization code and data: */
+
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+
+ .init.data :
+ {
+ INIT_DATA
+ . = ALIGN(0x4);
+ __tagtable_begin = .;
+ *(.taglist)
+ __tagtable_end = .;
+
+ . = ALIGN(16);
+ __boot_reloc_table_start = ABSOLUTE(.);
+
+#ifdef CONFIG_VECTORS_OFFSET
+ RELOCATE_ENTRY(_WindowVectors_text,
+ .WindowVectors.text);
+#if XCHAL_EXCM_LEVEL >= 2
+ RELOCATE_ENTRY(_Level2InterruptVector_text,
+ .Level2InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ RELOCATE_ENTRY(_Level3InterruptVector_text,
+ .Level3InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ RELOCATE_ENTRY(_Level4InterruptVector_text,
+ .Level4InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ RELOCATE_ENTRY(_Level5InterruptVector_text,
+ .Level5InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ RELOCATE_ENTRY(_Level6InterruptVector_text,
+ .Level6InterruptVector.text);
+#endif
+ RELOCATE_ENTRY(_KernelExceptionVector_text,
+ .KernelExceptionVector.text);
+ RELOCATE_ENTRY(_UserExceptionVector_text,
+ .UserExceptionVector.text);
+ RELOCATE_ENTRY(_DoubleExceptionVector_text,
+ .DoubleExceptionVector.text);
+ RELOCATE_ENTRY(_DebugInterruptVector_text,
+ .DebugInterruptVector.text);
+#endif
+#if defined(CONFIG_SMP)
+ RELOCATE_ENTRY(_SecondaryResetVector_text,
+ .SecondaryResetVector.text);
+#endif
+
+
+ __boot_reloc_table_end = ABSOLUTE(.) ;
+
+ INIT_SETUP(XCHAL_ICACHE_LINESIZE)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ INIT_RAM_FS
+ }
+
+ PERCPU_SECTION(XCHAL_ICACHE_LINESIZE)
+
+ /* We need this dummy segment here */
+
+ . = ALIGN(4);
+ .dummy : { LONG(0) }
+
+#ifdef CONFIG_VECTORS_OFFSET
+ /* The vectors are relocated to the real position at startup time */
+
+ SECTION_VECTOR (_WindowVectors_text,
+ .WindowVectors.text,
+ WINDOW_VECTORS_VADDR,
+ .dummy)
+ SECTION_VECTOR (_DebugInterruptVector_text,
+ .DebugInterruptVector.text,
+ DEBUG_VECTOR_VADDR,
+ .WindowVectors.text)
+#undef LAST
+#define LAST .DebugInterruptVector.text
+#if XCHAL_EXCM_LEVEL >= 2
+ SECTION_VECTOR (_Level2InterruptVector_text,
+ .Level2InterruptVector.text,
+ INTLEVEL2_VECTOR_VADDR,
+ LAST)
+# undef LAST
+# define LAST .Level2InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ SECTION_VECTOR (_Level3InterruptVector_text,
+ .Level3InterruptVector.text,
+ INTLEVEL3_VECTOR_VADDR,
+ LAST)
+# undef LAST
+# define LAST .Level3InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ SECTION_VECTOR (_Level4InterruptVector_text,
+ .Level4InterruptVector.text,
+ INTLEVEL4_VECTOR_VADDR,
+ LAST)
+# undef LAST
+# define LAST .Level4InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ SECTION_VECTOR (_Level5InterruptVector_text,
+ .Level5InterruptVector.text,
+ INTLEVEL5_VECTOR_VADDR,
+ LAST)
+# undef LAST
+# define LAST .Level5InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ SECTION_VECTOR (_Level6InterruptVector_text,
+ .Level6InterruptVector.text,
+ INTLEVEL6_VECTOR_VADDR,
+ LAST)
+# undef LAST
+# define LAST .Level6InterruptVector.text
+#endif
+ SECTION_VECTOR (_KernelExceptionVector_text,
+ .KernelExceptionVector.text,
+ KERNEL_VECTOR_VADDR,
+ LAST)
+#undef LAST
+ SECTION_VECTOR (_UserExceptionVector_text,
+ .UserExceptionVector.text,
+ USER_VECTOR_VADDR,
+ .KernelExceptionVector.text)
+ SECTION_VECTOR (_DoubleExceptionVector_text,
+ .DoubleExceptionVector.text,
+ DOUBLEEXC_VECTOR_VADDR,
+ .UserExceptionVector.text)
+
+ . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+
+#endif
+#if defined(CONFIG_SMP)
+
+ SECTION_VECTOR (_SecondaryResetVector_text,
+ .SecondaryResetVector.text,
+ RESET_VECTOR1_VADDR,
+ .DoubleExceptionVector.text)
+
+ . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
+
+#endif
+
+ . = ALIGN(PAGE_SIZE);
+
+ __init_end = .;
+
+ BSS_SECTION(0, 8192, 0)
+
+ _end = .;
+
+ .xt.lit : { *(.xt.lit) }
+ .xt.prop : { *(.xt.prop) }
+
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+
+ .xt.insn 0 :
+ {
+ *(.xt.insn)
+ *(.gnu.linkonce.x*)
+ }
+
+ .xt.lit 0 :
+ {
+ *(.xt.lit)
+ *(.gnu.linkonce.p*)
+ }
+
+ /* Sections to be discarded */
+ DISCARDS
+}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
new file mode 100644
index 000000000..24cf6972e
--- /dev/null
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -0,0 +1,132 @@
+/*
+ * arch/xtensa/kernel/xtensa_ksyms.c
+ *
+ * Export Xtensa-specific functions for loadable modules.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+#include <linux/in6.h>
+
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/ftrace.h>
+#ifdef CONFIG_BLK_DEV_FD
+#include <asm/floppy.h>
+#endif
+#ifdef CONFIG_NET
+#include <net/checksum.h>
+#endif /* CONFIG_NET */
+
+
+/*
+ * String functions
+ */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
+#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
+EXPORT_SYMBOL(__strncpy_user);
+#endif
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * gcc internal math functions
+ */
+extern long long __ashrdi3(long long, int);
+extern long long __ashldi3(long long, int);
+extern long long __lshrdi3(long long, int);
+extern int __divsi3(int, int);
+extern int __modsi3(int, int);
+extern long long __muldi3(long long, long long);
+extern int __mulsi3(int, int);
+extern unsigned int __udivsi3(unsigned int, unsigned int);
+extern unsigned int __umodsi3(unsigned int, unsigned int);
+extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
+extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
+extern int __ucmpdi2(int, int);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__mulsi3);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__udivdi3);
+EXPORT_SYMBOL(__umoddi3);
+EXPORT_SYMBOL(__ucmpdi2);
+
+void __xtensa_libgcc_window_spill(void)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
+
+unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__sync_fetch_and_and_4);
+
+unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__sync_fetch_and_or_4);
+
+/*
+ * Networking support
+ */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_generic);
+
+/*
+ * Architecture-specific symbols
+ */
+EXPORT_SYMBOL(__xtensa_copy_user);
+EXPORT_SYMBOL(__invalidate_icache_range);
+
+/*
+ * Kernel hacking ...
+ */
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+// FIXME EXPORT_SYMBOL(screen_info);
+#endif
+
+extern long common_exception_return;
+EXPORT_SYMBOL(common_exception_return);
+
+#ifdef CONFIG_FUNCTION_TRACER
+EXPORT_SYMBOL(_mcount);
+#endif
+
+EXPORT_SYMBOL(__invalidate_dcache_range);
+#if XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif
diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile
new file mode 100644
index 000000000..6c4fdd86a
--- /dev/null
+++ b/arch/xtensa/lib/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Xtensa-specific library files.
+#
+
+lib-y += memcopy.o memset.o checksum.o \
+ usercopy.o strncpy_user.o strnlen_user.o
+lib-$(CONFIG_PCI) += pci-auto.o
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
new file mode 100644
index 000000000..528fe0dd9
--- /dev/null
+++ b/arch/xtensa/lib/checksum.S
@@ -0,0 +1,398 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * IP/TCP/UDP checksumming routines
+ *
+ * Xtensa version: Copyright (C) 2001 Tensilica, Inc. by Kevin Chea
+ * Optimized by Joe Taylor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <variant/core.h>
+#include <asm/asmmacro.h>
+
+/*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+
+/*
+ * unsigned int csum_partial(const unsigned char *buf, int len,
+ * unsigned int sum);
+ * a2 = buf
+ * a3 = len
+ * a4 = sum
+ *
+ * This function assumes 2- or 4-byte alignment. Other alignments will fail!
+ */
+
+/* ONES_ADD converts twos-complement math to ones-complement. */
+#define ONES_ADD(sum, val) \
+ add sum, sum, val ; \
+ bgeu sum, val, 99f ; \
+ addi sum, sum, 1 ; \
+99: ;
+
+.text
+ENTRY(csum_partial)
+
+ /*
+ * Experiments with Ethernet and SLIP connections show that buf
+ * is aligned on either a 2-byte or 4-byte boundary.
+ */
+ entry sp, 32
+ extui a5, a2, 0, 2
+ bnez a5, 8f /* branch if 2-byte aligned */
+ /* Fall-through on common case, 4-byte alignment */
+1:
+ srli a5, a3, 5 /* 32-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a5, 2f
+#else
+ beqz a5, 2f
+ slli a5, a5, 5
+ add a5, a5, a2 /* a5 = end of last 32-byte chunk */
+.Loop1:
+#endif
+ l32i a6, a2, 0
+ l32i a7, a2, 4
+ ONES_ADD(a4, a6)
+ ONES_ADD(a4, a7)
+ l32i a6, a2, 8
+ l32i a7, a2, 12
+ ONES_ADD(a4, a6)
+ ONES_ADD(a4, a7)
+ l32i a6, a2, 16
+ l32i a7, a2, 20
+ ONES_ADD(a4, a6)
+ ONES_ADD(a4, a7)
+ l32i a6, a2, 24
+ l32i a7, a2, 28
+ ONES_ADD(a4, a6)
+ ONES_ADD(a4, a7)
+ addi a2, a2, 4*8
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a5, .Loop1
+#endif
+2:
+ extui a5, a3, 2, 3 /* remaining 4-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a5, 3f
+#else
+ beqz a5, 3f
+ slli a5, a5, 2
+ add a5, a5, a2 /* a5 = end of last 4-byte chunk */
+.Loop2:
+#endif
+ l32i a6, a2, 0
+ ONES_ADD(a4, a6)
+ addi a2, a2, 4
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a5, .Loop2
+#endif
+3:
+ _bbci.l a3, 1, 5f /* remaining 2-byte chunk */
+ l16ui a6, a2, 0
+ ONES_ADD(a4, a6)
+ addi a2, a2, 2
+5:
+ _bbci.l a3, 0, 7f /* remaining 1-byte chunk */
+6: l8ui a6, a2, 0
+#ifdef __XTENSA_EB__
+ slli a6, a6, 8 /* load byte into bits 8..15 */
+#endif
+ ONES_ADD(a4, a6)
+7:
+ mov a2, a4
+ retw
+
+ /* uncommon case, buf is 2-byte aligned */
+8:
+ beqz a3, 7b /* branch if len == 0 */
+ beqi a3, 1, 6b /* branch if len == 1 */
+
+ extui a5, a2, 0, 1
+ bnez a5, 8f /* branch if 1-byte aligned */
+
+ l16ui a6, a2, 0 /* common case, len >= 2 */
+ ONES_ADD(a4, a6)
+ addi a2, a2, 2 /* adjust buf */
+ addi a3, a3, -2 /* adjust len */
+ j 1b /* now buf is 4-byte aligned */
+
+ /* case: odd-byte aligned, len > 1
+ * This case is dog slow, so don't give us an odd address.
+ * (I don't think this ever happens, but just in case.)
+ */
+8:
+ srli a5, a3, 2 /* 4-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a5, 2f
+#else
+ beqz a5, 2f
+ slli a5, a5, 2
+ add a5, a5, a2 /* a5 = end of last 4-byte chunk */
+.Loop3:
+#endif
+ l8ui a6, a2, 0 /* bits 24..31 */
+ l16ui a7, a2, 1 /* bits 8..23 */
+ l8ui a8, a2, 3 /* bits 0.. 8 */
+#ifdef __XTENSA_EB__
+ slli a6, a6, 24
+#else
+ slli a8, a8, 24
+#endif
+ slli a7, a7, 8
+ or a7, a7, a6
+ or a7, a7, a8
+ ONES_ADD(a4, a7)
+ addi a2, a2, 4
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a5, .Loop3
+#endif
+2:
+ _bbci.l a3, 1, 3f /* remaining 2-byte chunk, still odd addr */
+ l8ui a6, a2, 0
+ l8ui a7, a2, 1
+#ifdef __XTENSA_EB__
+ slli a6, a6, 8
+#else
+ slli a7, a7, 8
+#endif
+ or a7, a7, a6
+ ONES_ADD(a4, a7)
+ addi a2, a2, 2
+3:
+ j 5b /* branch to handle the remaining byte */
+
+ENDPROC(csum_partial)
+
+/*
+ * Copy from ds while checksumming, otherwise like csum_partial
+ */
+
+/*
+unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
+ int sum, int *src_err_ptr, int *dst_err_ptr)
+ a2 = src
+ a3 = dst
+ a4 = len
+ a5 = sum
+ a6 = src_err_ptr
+ a7 = dst_err_ptr
+ a8 = temp
+ a9 = temp
+ a10 = temp
+ a11 = original len for exception handling
+ a12 = original dst for exception handling
+
+ This function is optimized for 4-byte aligned addresses. Other
+ alignments work, but not nearly as efficiently.
+ */
+
+ENTRY(csum_partial_copy_generic)
+
+ entry sp, 32
+ mov a12, a3
+ mov a11, a4
+ or a10, a2, a3
+
+ /* We optimize the following alignment tests for the 4-byte
+ aligned case. Two bbsi.l instructions might seem more optimal
+ (commented out below). However, both labels 5: and 3: are out
+ of the imm8 range, so the assembler relaxes them into
+ equivalent bbci.l, j combinations, which is actually
+ slower. */
+
+ extui a9, a10, 0, 2
+ beqz a9, 1f /* branch if both are 4-byte aligned */
+ bbsi.l a10, 0, 5f /* branch if one address is odd */
+ j 3f /* one address is 2-byte aligned */
+
+/* _bbsi.l a10, 0, 5f */ /* branch if odd address */
+/* _bbsi.l a10, 1, 3f */ /* branch if 2-byte-aligned address */
+
+1:
+ /* src and dst are both 4-byte aligned */
+ srli a10, a4, 5 /* 32-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a10, 2f
+#else
+ beqz a10, 2f
+ slli a10, a10, 5
+ add a10, a10, a2 /* a10 = end of last 32-byte src chunk */
+.Loop5:
+#endif
+EX(10f) l32i a9, a2, 0
+EX(10f) l32i a8, a2, 4
+EX(11f) s32i a9, a3, 0
+EX(11f) s32i a8, a3, 4
+ ONES_ADD(a5, a9)
+ ONES_ADD(a5, a8)
+EX(10f) l32i a9, a2, 8
+EX(10f) l32i a8, a2, 12
+EX(11f) s32i a9, a3, 8
+EX(11f) s32i a8, a3, 12
+ ONES_ADD(a5, a9)
+ ONES_ADD(a5, a8)
+EX(10f) l32i a9, a2, 16
+EX(10f) l32i a8, a2, 20
+EX(11f) s32i a9, a3, 16
+EX(11f) s32i a8, a3, 20
+ ONES_ADD(a5, a9)
+ ONES_ADD(a5, a8)
+EX(10f) l32i a9, a2, 24
+EX(10f) l32i a8, a2, 28
+EX(11f) s32i a9, a3, 24
+EX(11f) s32i a8, a3, 28
+ ONES_ADD(a5, a9)
+ ONES_ADD(a5, a8)
+ addi a2, a2, 32
+ addi a3, a3, 32
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a10, .Loop5
+#endif
+2:
+ extui a10, a4, 2, 3 /* remaining 4-byte chunks */
+ extui a4, a4, 0, 2 /* reset len for general-case, 2-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a10, 3f
+#else
+ beqz a10, 3f
+ slli a10, a10, 2
+ add a10, a10, a2 /* a10 = end of last 4-byte src chunk */
+.Loop6:
+#endif
+EX(10f) l32i a9, a2, 0
+EX(11f) s32i a9, a3, 0
+ ONES_ADD(a5, a9)
+ addi a2, a2, 4
+ addi a3, a3, 4
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a10, .Loop6
+#endif
+3:
+ /*
+ Control comes to here in two cases: (1) It may fall through
+ to here from the 4-byte alignment case to process, at most,
+ one 2-byte chunk. (2) It branches to here from above if
+ either src or dst is 2-byte aligned, and we process all bytes
+ here, except for perhaps a trailing odd byte. It's
+ inefficient, so align your addresses to 4-byte boundaries.
+
+ a2 = src
+ a3 = dst
+ a4 = len
+ a5 = sum
+ */
+ srli a10, a4, 1 /* 2-byte chunks */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a10, 4f
+#else
+ beqz a10, 4f
+ slli a10, a10, 1
+ add a10, a10, a2 /* a10 = end of last 2-byte src chunk */
+.Loop7:
+#endif
+EX(10f) l16ui a9, a2, 0
+EX(11f) s16i a9, a3, 0
+ ONES_ADD(a5, a9)
+ addi a2, a2, 2
+ addi a3, a3, 2
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a10, .Loop7
+#endif
+4:
+ /* This section processes a possible trailing odd byte. */
+ _bbci.l a4, 0, 8f /* 1-byte chunk */
+EX(10f) l8ui a9, a2, 0
+EX(11f) s8i a9, a3, 0
+#ifdef __XTENSA_EB__
+ slli a9, a9, 8 /* shift byte to bits 8..15 */
+#endif
+ ONES_ADD(a5, a9)
+8:
+ mov a2, a5
+ retw
+
+5:
+ /* Control branch to here when either src or dst is odd. We
+ process all bytes using 8-bit accesses. Grossly inefficient,
+ so don't feed us an odd address. */
+
+ srli a10, a4, 1 /* handle in pairs for 16-bit csum */
+#if XCHAL_HAVE_LOOPS
+ loopgtz a10, 6f
+#else
+ beqz a10, 6f
+ slli a10, a10, 1
+ add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */
+.Loop8:
+#endif
+EX(10f) l8ui a9, a2, 0
+EX(10f) l8ui a8, a2, 1
+EX(11f) s8i a9, a3, 0
+EX(11f) s8i a8, a3, 1
+#ifdef __XTENSA_EB__
+ slli a9, a9, 8 /* combine into a single 16-bit value */
+#else /* for checksum computation */
+ slli a8, a8, 8
+#endif
+ or a9, a9, a8
+ ONES_ADD(a5, a9)
+ addi a2, a2, 2
+ addi a3, a3, 2
+#if !XCHAL_HAVE_LOOPS
+ blt a2, a10, .Loop8
+#endif
+6:
+ j 4b /* process the possible trailing odd byte */
+
+ENDPROC(csum_partial_copy_generic)
+
+
+# Exception handler:
+.section .fixup, "ax"
+/*
+ a6 = src_err_ptr
+ a7 = dst_err_ptr
+ a11 = original len for exception handling
+ a12 = original dst for exception handling
+*/
+
+10:
+ _movi a2, -EFAULT
+ s32i a2, a6, 0 /* src_err_ptr */
+
+ # clear the complete destination - computing the rest
+ # is too much work
+ movi a2, 0
+#if XCHAL_HAVE_LOOPS
+ loopgtz a11, 2f
+#else
+ beqz a11, 2f
+ add a11, a11, a12 /* a11 = ending address */
+.Leloop:
+#endif
+ s8i a2, a12, 0
+ addi a12, a12, 1
+#if !XCHAL_HAVE_LOOPS
+ blt a12, a11, .Leloop
+#endif
+2:
+ retw
+
+11:
+ movi a2, -EFAULT
+ s32i a2, a7, 0 /* dst_err_ptr */
+ movi a2, 0
+ retw
+
+.previous
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
new file mode 100644
index 000000000..c0f698171
--- /dev/null
+++ b/arch/xtensa/lib/memcopy.S
@@ -0,0 +1,553 @@
+/*
+ * arch/xtensa/lib/hal/memcopy.S -- Core HAL library functions
+ * xthal_memcpy and xthal_bcopy
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 - 2012 Tensilica Inc.
+ */
+
+#include <linux/linkage.h>
+#include <variant/core.h>
+#include <asm/asmmacro.h>
+
+/*
+ * void *memcpy(void *dst, const void *src, size_t len);
+ *
+ * This function is intended to do the same thing as the standard
+ * library function memcpy() for most cases.
+ * However, where the source and/or destination references
+ * an instruction RAM or ROM or a data RAM or ROM, that
+ * source and/or destination will always be accessed with
+ * 32-bit load and store instructions (as required for these
+ * types of devices).
+ *
+ * !!!!!!! XTFIXME:
+ * !!!!!!! Handling of IRAM/IROM has not yet
+ * !!!!!!! been implemented.
+ *
+ * The (general case) algorithm is as follows:
+ * If destination is unaligned, align it by conditionally
+ * copying 1 and 2 bytes.
+ * If source is aligned,
+ * do 16 bytes with a loop, and then finish up with
+ * 8, 4, 2, and 1 byte copies conditional on the length;
+ * else (if source is unaligned),
+ * do the same, but use SRC to align the source data.
+ * This code tries to use fall-through branches for the common
+ * case of aligned source and destination and multiple
+ * of 4 (or 8) length.
+ *
+ * Register use:
+ * a0/ return address
+ * a1/ stack pointer
+ * a2/ return value
+ * a3/ src
+ * a4/ length
+ * a5/ dst
+ * a6/ tmp
+ * a7/ tmp
+ * a8/ tmp
+ * a9/ tmp
+ * a10/ tmp
+ * a11/ tmp
+ */
+
+ .text
+
+/*
+ * Byte by byte copy
+ */
+ .align 4
+ .byte 0 # 1 mod 4 alignment for LOOPNEZ
+ # (0 mod 4 alignment for LBEG)
+.Lbytecopy:
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lbytecopydone
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a4, .Lbytecopydone
+ add a7, a3, a4 # a7 = end address for source
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lnextbyte:
+ l8ui a6, a3, 0
+ addi a3, a3, 1
+ s8i a6, a5, 0
+ addi a5, a5, 1
+#if !XCHAL_HAVE_LOOPS
+ bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbytecopydone:
+ retw
+
+/*
+ * Destination is unaligned
+ */
+
+ .align 4
+.Ldst1mod2: # dst is only byte aligned
+ _bltui a4, 7, .Lbytecopy # do short copies byte by byte
+
+ # copy 1 byte
+ l8ui a6, a3, 0
+ addi a3, a3, 1
+ addi a4, a4, -1
+ s8i a6, a5, 0
+ addi a5, a5, 1
+ _bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
+ # return to main algorithm
+.Ldst2mod4: # dst 16-bit aligned
+ # copy 2 bytes
+ _bltui a4, 6, .Lbytecopy # do short copies byte by byte
+ l8ui a6, a3, 0
+ l8ui a7, a3, 1
+ addi a3, a3, 2
+ addi a4, a4, -2
+ s8i a6, a5, 0
+ s8i a7, a5, 1
+ addi a5, a5, 2
+ j .Ldstaligned # dst is now aligned, return to main algorithm
+
+ENTRY(__memcpy)
+WEAK(memcpy)
+
+ entry sp, 16 # minimal stack frame
+ # a2/ dst, a3/ src, a4/ len
+ mov a5, a2 # copy dst so that a2 is return value
+.Lcommon:
+ _bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2
+ _bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4
+.Ldstaligned: # return here from .Ldst?mod? once dst is aligned
+ srli a7, a4, 4 # number of loop iterations with 16B
+ # per iteration
+ movi a8, 3 # if source is not aligned,
+ _bany a3, a8, .Lsrcunaligned # then use shifting copy
+ /*
+ * Destination and source are word-aligned, use word copy.
+ */
+ # copy 16 bytes per iteration for word-aligned dst and word-aligned src
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .Loop1done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .Loop1done
+ slli a8, a7, 4
+ add a8, a8, a3 # a8 = end of last 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1:
+ l32i a6, a3, 0
+ l32i a7, a3, 4
+ s32i a6, a5, 0
+ l32i a6, a3, 8
+ s32i a7, a5, 4
+ l32i a7, a3, 12
+ s32i a6, a5, 8
+ addi a3, a3, 16
+ s32i a7, a5, 12
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ bne a3, a8, .Loop1 # continue loop if a3:src != a8:src_end
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1done:
+ bbci.l a4, 3, .L2
+ # copy 8 bytes
+ l32i a6, a3, 0
+ l32i a7, a3, 4
+ addi a3, a3, 8
+ s32i a6, a5, 0
+ s32i a7, a5, 4
+ addi a5, a5, 8
+.L2:
+ bbsi.l a4, 2, .L3
+ bbsi.l a4, 1, .L4
+ bbsi.l a4, 0, .L5
+ retw
+.L3:
+ # copy 4 bytes
+ l32i a6, a3, 0
+ addi a3, a3, 4
+ s32i a6, a5, 0
+ addi a5, a5, 4
+ bbsi.l a4, 1, .L4
+ bbsi.l a4, 0, .L5
+ retw
+.L4:
+ # copy 2 bytes
+ l16ui a6, a3, 0
+ addi a3, a3, 2
+ s16i a6, a5, 0
+ addi a5, a5, 2
+ bbsi.l a4, 0, .L5
+ retw
+.L5:
+ # copy 1 byte
+ l8ui a6, a3, 0
+ s8i a6, a5, 0
+ retw
+
+/*
+ * Destination is aligned, Source is unaligned
+ */
+
+ .align 4
+.Lsrcunaligned:
+ _beqz a4, .Ldone # avoid loading anything for zero-length copies
+ # copy 16 bytes per iteration for word-aligned dst and unaligned src
+ __ssa8 a3 # set shift amount from byte offset
+
+/* set to 1 when running on ISS (simulator) with the
+ lint or ferret client, or 0 to save a few cycles */
+#define SIM_CHECKS_ALIGNMENT 1
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
+ and a11, a3, a8 # save unalignment offset for below
+ sub a3, a3, a11 # align a3
+#endif
+ l32i a6, a3, 0 # load first word
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .Loop2done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .Loop2done
+ slli a10, a7, 4
+ add a10, a10, a3 # a10 = end of last 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop2:
+ l32i a7, a3, 4
+ l32i a8, a3, 8
+ __src_b a6, a6, a7
+ s32i a6, a5, 0
+ l32i a9, a3, 12
+ __src_b a7, a7, a8
+ s32i a7, a5, 4
+ l32i a6, a3, 16
+ __src_b a8, a8, a9
+ s32i a8, a5, 8
+ addi a3, a3, 16
+ __src_b a9, a9, a6
+ s32i a9, a5, 12
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ bne a3, a10, .Loop2 # continue loop if a3:src != a10:src_end
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop2done:
+ bbci.l a4, 3, .L12
+ # copy 8 bytes
+ l32i a7, a3, 4
+ l32i a8, a3, 8
+ __src_b a6, a6, a7
+ s32i a6, a5, 0
+ addi a3, a3, 8
+ __src_b a7, a7, a8
+ s32i a7, a5, 4
+ addi a5, a5, 8
+ mov a6, a8
+.L12:
+ bbci.l a4, 2, .L13
+ # copy 4 bytes
+ l32i a7, a3, 4
+ addi a3, a3, 4
+ __src_b a6, a6, a7
+ s32i a6, a5, 0
+ addi a5, a5, 4
+ mov a6, a7
+.L13:
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
+ add a3, a3, a11 # readjust a3 with correct misalignment
+#endif
+ bbsi.l a4, 1, .L14
+ bbsi.l a4, 0, .L15
+.Ldone: retw
+.L14:
+ # copy 2 bytes
+ l8ui a6, a3, 0
+ l8ui a7, a3, 1
+ addi a3, a3, 2
+ s8i a6, a5, 0
+ s8i a7, a5, 1
+ addi a5, a5, 2
+ bbsi.l a4, 0, .L15
+ retw
+.L15:
+ # copy 1 byte
+ l8ui a6, a3, 0
+ s8i a6, a5, 0
+ retw
+
+ENDPROC(__memcpy)
+
+/*
+ * void bcopy(const void *src, void *dest, size_t n);
+ */
+
+ENTRY(bcopy)
+
+ entry sp, 16 # minimal stack frame
+ # a2=src, a3=dst, a4=len
+ mov a5, a3
+ mov a3, a2
+ mov a2, a5
+ j .Lmovecommon # go to common code for memmove+bcopy
+
+ENDPROC(bcopy)
+
+/*
+ * void *memmove(void *dst, const void *src, size_t len);
+ *
+ * This function is intended to do the same thing as the standard
+ * library function memmove() for most cases.
+ * However, where the source and/or destination references
+ * an instruction RAM or ROM or a data RAM or ROM, that
+ * source and/or destination will always be accessed with
+ * 32-bit load and store instructions (as required for these
+ * types of devices).
+ *
+ * !!!!!!! XTFIXME:
+ * !!!!!!! Handling of IRAM/IROM has not yet
+ * !!!!!!! been implemented.
+ *
+ * The (general case) algorithm is as follows:
+ * If end of source doesn't overlap destination then use memcpy.
+ * Otherwise do memcpy backwards.
+ *
+ * Register use:
+ * a0/ return address
+ * a1/ stack pointer
+ * a2/ return value
+ * a3/ src
+ * a4/ length
+ * a5/ dst
+ * a6/ tmp
+ * a7/ tmp
+ * a8/ tmp
+ * a9/ tmp
+ * a10/ tmp
+ * a11/ tmp
+ */
+
+/*
+ * Byte by byte copy
+ */
+ .align 4
+ .byte 0 # 1 mod 4 alignment for LOOPNEZ
+ # (0 mod 4 alignment for LBEG)
+.Lbackbytecopy:
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lbackbytecopydone
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a4, .Lbackbytecopydone
+ sub a7, a3, a4 # a7 = start address for source
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbacknextbyte:
+ addi a3, a3, -1
+ l8ui a6, a3, 0
+ addi a5, a5, -1
+ s8i a6, a5, 0
+#if !XCHAL_HAVE_LOOPS
+ bne a3, a7, .Lbacknextbyte # continue loop if
+ # $a3:src != $a7:src_start
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbackbytecopydone:
+ retw
+
+/*
+ * Destination is unaligned
+ */
+
+ .align 4
+.Lbackdst1mod2: # dst is only byte aligned
+ _bltui a4, 7, .Lbackbytecopy # do short copies byte by byte
+
+ # copy 1 byte
+ addi a3, a3, -1
+ l8ui a6, a3, 0
+ addi a5, a5, -1
+ s8i a6, a5, 0
+ addi a4, a4, -1
+ _bbci.l a5, 1, .Lbackdstaligned # if dst is now aligned, then
+ # return to main algorithm
+.Lbackdst2mod4: # dst 16-bit aligned
+ # copy 2 bytes
+ _bltui a4, 6, .Lbackbytecopy # do short copies byte by byte
+ addi a3, a3, -2
+ l8ui a6, a3, 0
+ l8ui a7, a3, 1
+ addi a5, a5, -2
+ s8i a6, a5, 0
+ s8i a7, a5, 1
+ addi a4, a4, -2
+ j .Lbackdstaligned # dst is now aligned,
+ # return to main algorithm
+
+ENTRY(__memmove)
+WEAK(memmove)
+
+ entry sp, 16 # minimal stack frame
+ # a2/ dst, a3/ src, a4/ len
+ mov a5, a2 # copy dst so that a2 is return value
+.Lmovecommon:
+ sub a6, a5, a3
+ bgeu a6, a4, .Lcommon
+
+ add a5, a5, a4
+ add a3, a3, a4
+
+ _bbsi.l a5, 0, .Lbackdst1mod2 # if dst is 1 mod 2
+ _bbsi.l a5, 1, .Lbackdst2mod4 # if dst is 2 mod 4
+.Lbackdstaligned: # return here from .Lbackdst?mod? once dst is aligned
+ srli a7, a4, 4 # number of loop iterations with 16B
+ # per iteration
+ movi a8, 3 # if source is not aligned,
+ _bany a3, a8, .Lbacksrcunaligned # then use shifting copy
+ /*
+ * Destination and source are word-aligned, use word copy.
+ */
+ # copy 16 bytes per iteration for word-aligned dst and word-aligned src
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .backLoop1done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .backLoop1done
+ slli a8, a7, 4
+ sub a8, a3, a8 # a8 = start of first 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.backLoop1:
+ addi a3, a3, -16
+ l32i a7, a3, 12
+ l32i a6, a3, 8
+ addi a5, a5, -16
+ s32i a7, a5, 12
+ l32i a7, a3, 4
+ s32i a6, a5, 8
+ l32i a6, a3, 0
+ s32i a7, a5, 4
+ s32i a6, a5, 0
+#if !XCHAL_HAVE_LOOPS
+ bne a3, a8, .backLoop1 # continue loop if a3:src != a8:src_start
+#endif /* !XCHAL_HAVE_LOOPS */
+.backLoop1done:
+ bbci.l a4, 3, .Lback2
+ # copy 8 bytes
+ addi a3, a3, -8
+ l32i a6, a3, 0
+ l32i a7, a3, 4
+ addi a5, a5, -8
+ s32i a6, a5, 0
+ s32i a7, a5, 4
+.Lback2:
+ bbsi.l a4, 2, .Lback3
+ bbsi.l a4, 1, .Lback4
+ bbsi.l a4, 0, .Lback5
+ retw
+.Lback3:
+ # copy 4 bytes
+ addi a3, a3, -4
+ l32i a6, a3, 0
+ addi a5, a5, -4
+ s32i a6, a5, 0
+ bbsi.l a4, 1, .Lback4
+ bbsi.l a4, 0, .Lback5
+ retw
+.Lback4:
+ # copy 2 bytes
+ addi a3, a3, -2
+ l16ui a6, a3, 0
+ addi a5, a5, -2
+ s16i a6, a5, 0
+ bbsi.l a4, 0, .Lback5
+ retw
+.Lback5:
+ # copy 1 byte
+ addi a3, a3, -1
+ l8ui a6, a3, 0
+ addi a5, a5, -1
+ s8i a6, a5, 0
+ retw
+
+/*
+ * Destination is aligned, Source is unaligned
+ */
+
+ .align 4
+.Lbacksrcunaligned:
+ _beqz a4, .Lbackdone # avoid loading anything for zero-length copies
+ # copy 16 bytes per iteration for word-aligned dst and unaligned src
+ __ssa8 a3 # set shift amount from byte offset
+#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with
+ * the lint or ferret client, or 0
+ * to save a few cycles */
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
+ and a11, a3, a8 # save unalignment offset for below
+ sub a3, a3, a11 # align a3
+#endif
+ l32i a6, a3, 0 # load first word
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .backLoop2done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .backLoop2done
+ slli a10, a7, 4
+ sub a10, a3, a10 # a10 = start of first 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.backLoop2:
+ addi a3, a3, -16
+ l32i a7, a3, 12
+ l32i a8, a3, 8
+ addi a5, a5, -16
+ __src_b a6, a7, a6
+ s32i a6, a5, 12
+ l32i a9, a3, 4
+ __src_b a7, a8, a7
+ s32i a7, a5, 8
+ l32i a6, a3, 0
+ __src_b a8, a9, a8
+ s32i a8, a5, 4
+ __src_b a9, a6, a9
+ s32i a9, a5, 0
+#if !XCHAL_HAVE_LOOPS
+ bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start
+#endif /* !XCHAL_HAVE_LOOPS */
+.backLoop2done:
+ bbci.l a4, 3, .Lback12
+ # copy 8 bytes
+ addi a3, a3, -8
+ l32i a7, a3, 4
+ l32i a8, a3, 0
+ addi a5, a5, -8
+ __src_b a6, a7, a6
+ s32i a6, a5, 4
+ __src_b a7, a8, a7
+ s32i a7, a5, 0
+ mov a6, a8
+.Lback12:
+ bbci.l a4, 2, .Lback13
+ # copy 4 bytes
+ addi a3, a3, -4
+ l32i a7, a3, 0
+ addi a5, a5, -4
+ __src_b a6, a7, a6
+ s32i a6, a5, 0
+ mov a6, a7
+.Lback13:
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
+ add a3, a3, a11 # readjust a3 with correct misalignment
+#endif
+ bbsi.l a4, 1, .Lback14
+ bbsi.l a4, 0, .Lback15
+.Lbackdone:
+ retw
+.Lback14:
+ # copy 2 bytes
+ addi a3, a3, -2
+ l8ui a6, a3, 0
+ l8ui a7, a3, 1
+ addi a5, a5, -2
+ s8i a6, a5, 0
+ s8i a7, a5, 1
+ bbsi.l a4, 0, .Lback15
+ retw
+.Lback15:
+ # copy 1 byte
+ addi a3, a3, -1
+ addi a5, a5, -1
+ l8ui a6, a3, 0
+ s8i a6, a5, 0
+ retw
+
+ENDPROC(__memmove)
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
new file mode 100644
index 000000000..276747dec
--- /dev/null
+++ b/arch/xtensa/lib/memset.S
@@ -0,0 +1,153 @@
+/*
+ * arch/xtensa/lib/memset.S
+ *
+ * ANSI C standard library function memset
+ * (Well, almost. .fixup code might return zero.)
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+#include <linux/linkage.h>
+#include <variant/core.h>
+#include <asm/asmmacro.h>
+
+/*
+ * void *memset(void *dst, int c, size_t length)
+ *
+ * The algorithm is as follows:
+ * Create a word with c in all byte positions
+ * If the destination is aligned,
+ * do 16B chucks with a loop, and then finish up with
+ * 8B, 4B, 2B, and 1B stores conditional on the length.
+ * If destination is unaligned, align it by conditionally
+ * setting 1B and 2B and then go to aligned case.
+ * This code tries to use fall-through branches for the common
+ * case of an aligned destination (except for the branches to
+ * the alignment labels).
+ */
+
+.text
+ENTRY(__memset)
+WEAK(memset)
+
+ entry sp, 16 # minimal stack frame
+ # a2/ dst, a3/ c, a4/ length
+ extui a3, a3, 0, 8 # mask to just 8 bits
+ slli a7, a3, 8 # duplicate character in all bytes of word
+ or a3, a3, a7 # ...
+ slli a7, a3, 16 # ...
+ or a3, a3, a7 # ...
+ mov a5, a2 # copy dst so that a2 is return value
+ movi a6, 3 # for alignment tests
+ bany a2, a6, .Ldstunaligned # if dst is unaligned
+.L0: # return here from .Ldstunaligned when dst is aligned
+ srli a7, a4, 4 # number of loop iterations with 16B
+ # per iteration
+ bnez a4, .Laligned
+ retw
+
+/*
+ * Destination is word-aligned.
+ */
+ # set 16 bytes per iteration for word-aligned dst
+ .align 4 # 1 mod 4 alignment for LOOPNEZ
+ .byte 0 # (0 mod 4 alignment for LBEG)
+.Laligned:
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .Loop1done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .Loop1done
+ slli a6, a7, 4
+ add a6, a6, a5 # a6 = end of last 16B chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1:
+EX(10f) s32i a3, a5, 0
+EX(10f) s32i a3, a5, 4
+EX(10f) s32i a3, a5, 8
+EX(10f) s32i a3, a5, 12
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ blt a5, a6, .Loop1
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1done:
+ bbci.l a4, 3, .L2
+ # set 8 bytes
+EX(10f) s32i a3, a5, 0
+EX(10f) s32i a3, a5, 4
+ addi a5, a5, 8
+.L2:
+ bbci.l a4, 2, .L3
+ # set 4 bytes
+EX(10f) s32i a3, a5, 0
+ addi a5, a5, 4
+.L3:
+ bbci.l a4, 1, .L4
+ # set 2 bytes
+EX(10f) s16i a3, a5, 0
+ addi a5, a5, 2
+.L4:
+ bbci.l a4, 0, .L5
+ # set 1 byte
+EX(10f) s8i a3, a5, 0
+.L5:
+.Lret1:
+ retw
+
+/*
+ * Destination is unaligned
+ */
+
+.Ldstunaligned:
+ bltui a4, 8, .Lbyteset # do short copies byte by byte
+ bbci.l a5, 0, .L20 # branch if dst alignment half-aligned
+ # dst is only byte aligned
+ # set 1 byte
+EX(10f) s8i a3, a5, 0
+ addi a5, a5, 1
+ addi a4, a4, -1
+ # now retest if dst aligned
+ bbci.l a5, 1, .L0 # if now aligned, return to main algorithm
+.L20:
+ # dst half-aligned
+ # set 2 bytes
+EX(10f) s16i a3, a5, 0
+ addi a5, a5, 2
+ addi a4, a4, -2
+ j .L0 # dst is now aligned, return to main algorithm
+
+/*
+ * Byte by byte set
+ */
+ .align 4
+ .byte 0 # 1 mod 4 alignment for LOOPNEZ
+ # (0 mod 4 alignment for LBEG)
+.Lbyteset:
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lbytesetdone
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a4, .Lbytesetdone
+ add a6, a5, a4 # a6 = ending address
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbyteloop:
+EX(10f) s8i a3, a5, 0
+ addi a5, a5, 1
+#if !XCHAL_HAVE_LOOPS
+ blt a5, a6, .Lbyteloop
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbytesetdone:
+ retw
+
+ENDPROC(__memset)
+
+ .section .fixup, "ax"
+ .align 4
+
+/* We return zero if a failure occurred. */
+
+10:
+ movi a2, 0
+ retw
diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c
new file mode 100644
index 000000000..a2b558161
--- /dev/null
+++ b/arch/xtensa/lib/pci-auto.c
@@ -0,0 +1,320 @@
+/*
+ * arch/xtensa/lib/pci-auto.c
+ *
+ * PCI autoconfiguration library
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Chris Zankel <zankel@tensilica.com, cez@zankel.net>
+ *
+ * Based on work from Matt Porter <mporter@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/pci-bridge.h>
+
+
+/*
+ *
+ * Setting up a PCI
+ *
+ * pci_ctrl->first_busno = <first bus number (0)>
+ * pci_ctrl->last_busno = <last bus number (0xff)>
+ * pci_ctrl->ops = <PCI config operations>
+ * pci_ctrl->map_irq = <function to return the interrupt number for a device>
+ *
+ * pci_ctrl->io_space.start = <IO space start address (PCI view)>
+ * pci_ctrl->io_space.end = <IO space end address (PCI view)>
+ * pci_ctrl->io_space.base = <IO space offset: address 0 from CPU space>
+ * pci_ctrl->mem_space.start = <MEM space start address (PCI view)>
+ * pci_ctrl->mem_space.end = <MEM space end address (PCI view)>
+ * pci_ctrl->mem_space.base = <MEM space offset: address 0 from CPU space>
+ *
+ * pcibios_init_resource(&pci_ctrl->io_resource, <IO space start>,
+ * <IO space end>, IORESOURCE_IO, "PCI host bridge");
+ * pcibios_init_resource(&pci_ctrl->mem_resources[0], <MEM space start>,
+ * <MEM space end>, IORESOURCE_MEM, "PCI host bridge");
+ *
+ * pci_ctrl->last_busno = pciauto_bus_scan(pci_ctrl,pci_ctrl->first_busno);
+ *
+ * int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
+ *
+ */
+
+static int pciauto_upper_iospc;
+static int pciauto_upper_memspc;
+
+static struct pci_dev pciauto_dev;
+static struct pci_bus pciauto_bus;
+
+/*
+ * Helper functions
+ */
+
+/* Initialize the bars of a PCI device. */
+
+static void __init
+pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
+{
+ int bar_size;
+ int bar, bar_nr;
+ int *upper_limit;
+ int found_mem64 = 0;
+
+ for (bar = PCI_BASE_ADDRESS_0, bar_nr = 0;
+ bar <= bar_limit;
+ bar+=4, bar_nr++)
+ {
+ /* Tickle the BAR and get the size */
+ pci_write_config_dword(dev, bar, 0xffffffff);
+ pci_read_config_dword(dev, bar, &bar_size);
+
+ /* If BAR is not implemented go to the next BAR */
+ if (!bar_size)
+ continue;
+
+ /* Check the BAR type and set our address mask */
+ if (bar_size & PCI_BASE_ADDRESS_SPACE_IO)
+ {
+ bar_size &= PCI_BASE_ADDRESS_IO_MASK;
+ upper_limit = &pciauto_upper_iospc;
+ pr_debug("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
+ }
+ else
+ {
+ if ((bar_size & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
+ PCI_BASE_ADDRESS_MEM_TYPE_64)
+ found_mem64 = 1;
+
+ bar_size &= PCI_BASE_ADDRESS_MEM_MASK;
+ upper_limit = &pciauto_upper_memspc;
+ pr_debug("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
+ }
+
+ /* Allocate a base address (bar_size is negative!) */
+ *upper_limit = (*upper_limit + bar_size) & bar_size;
+
+ /* Write it out and update our limit */
+ pci_write_config_dword(dev, bar, *upper_limit);
+
+ /*
+ * If we are a 64-bit decoder then increment to the
+ * upper 32 bits of the bar and force it to locate
+ * in the lower 4GB of memory.
+ */
+
+ if (found_mem64)
+ pci_write_config_dword(dev, (bar+=4), 0x00000000);
+
+ pr_debug("size=0x%x, address=0x%x\n",
+ ~bar_size + 1, *upper_limit);
+ }
+}
+
+/* Initialize the interrupt number. */
+
+static void __init
+pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn)
+{
+ u8 pin;
+ int irq = 0;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+
+ /* Fix illegal pin numbers. */
+
+ if (pin == 0 || pin > 4)
+ pin = 1;
+
+ if (pci_ctrl->map_irq)
+ irq = pci_ctrl->map_irq(dev, PCI_SLOT(devfn), pin);
+
+ if (irq == -1)
+ irq = 0;
+
+ pr_debug("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
+
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+
+static void __init
+pciauto_prescan_setup_bridge(struct pci_dev *dev, int current_bus,
+ int sub_bus, int *iosave, int *memsave)
+{
+ /* Configure bus number registers */
+ pci_write_config_byte(dev, PCI_PRIMARY_BUS, current_bus);
+ pci_write_config_byte(dev, PCI_SECONDARY_BUS, sub_bus + 1);
+ pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, 0xff);
+
+ /* Round memory allocator to 1MB boundary */
+ pciauto_upper_memspc &= ~(0x100000 - 1);
+ *memsave = pciauto_upper_memspc;
+
+ /* Round I/O allocator to 4KB boundary */
+ pciauto_upper_iospc &= ~(0x1000 - 1);
+ *iosave = pciauto_upper_iospc;
+
+ /* Set up memory and I/O filter limits, assume 32-bit I/O space */
+ pci_write_config_word(dev, PCI_MEMORY_LIMIT,
+ ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
+ pci_write_config_byte(dev, PCI_IO_LIMIT,
+ ((pciauto_upper_iospc - 1) & 0x0000f000) >> 8);
+ pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
+ ((pciauto_upper_iospc - 1) & 0xffff0000) >> 16);
+}
+
+static void __init
+pciauto_postscan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus,
+ int *iosave, int *memsave)
+{
+ int cmdstat;
+
+ /* Configure bus number registers */
+ pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, sub_bus);
+
+ /*
+ * Round memory allocator to 1MB boundary.
+ * If no space used, allocate minimum.
+ */
+ pciauto_upper_memspc &= ~(0x100000 - 1);
+ if (*memsave == pciauto_upper_memspc)
+ pciauto_upper_memspc -= 0x00100000;
+
+ pci_write_config_word(dev, PCI_MEMORY_BASE, pciauto_upper_memspc >> 16);
+
+ /* Allocate 1MB for pre-fretch */
+ pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT,
+ ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
+
+ pciauto_upper_memspc -= 0x100000;
+
+ pci_write_config_word(dev, PCI_PREF_MEMORY_BASE,
+ pciauto_upper_memspc >> 16);
+
+ /* Round I/O allocator to 4KB boundary */
+ pciauto_upper_iospc &= ~(0x1000 - 1);
+ if (*iosave == pciauto_upper_iospc)
+ pciauto_upper_iospc -= 0x1000;
+
+ pci_write_config_byte(dev, PCI_IO_BASE,
+ (pciauto_upper_iospc & 0x0000f000) >> 8);
+ pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
+ pciauto_upper_iospc >> 16);
+
+ /* Enable memory and I/O accesses, enable bus master */
+ pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
+ pci_write_config_dword(dev, PCI_COMMAND,
+ cmdstat |
+ PCI_COMMAND_IO |
+ PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER);
+}
+
+/*
+ * Scan the current PCI bus.
+ */
+
+
+int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
+{
+ int sub_bus, pci_devfn, pci_class, cmdstat, found_multi=0;
+ unsigned short vid;
+ unsigned char header_type;
+ struct pci_dev *dev = &pciauto_dev;
+
+ pciauto_dev.bus = &pciauto_bus;
+ pciauto_dev.sysdata = pci_ctrl;
+ pciauto_bus.ops = pci_ctrl->ops;
+
+ /*
+ * Fetch our I/O and memory space upper boundaries used
+ * to allocated base addresses on this pci_controller.
+ */
+
+ if (current_bus == pci_ctrl->first_busno)
+ {
+ pciauto_upper_iospc = pci_ctrl->io_resource.end + 1;
+ pciauto_upper_memspc = pci_ctrl->mem_resources[0].end + 1;
+ }
+
+ sub_bus = current_bus;
+
+ for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++)
+ {
+ /* Skip our host bridge */
+ if ((current_bus == pci_ctrl->first_busno) && (pci_devfn == 0))
+ continue;
+
+ if (PCI_FUNC(pci_devfn) && !found_multi)
+ continue;
+
+ pciauto_bus.number = current_bus;
+ pciauto_dev.devfn = pci_devfn;
+
+ /* If config space read fails from this device, move on */
+ if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type))
+ continue;
+
+ if (!PCI_FUNC(pci_devfn))
+ found_multi = header_type & 0x80;
+ pci_read_config_word(dev, PCI_VENDOR_ID, &vid);
+
+ if (vid == 0xffff || vid == 0x0000) {
+ found_multi = 0;
+ continue;
+ }
+
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &pci_class);
+
+ if ((pci_class >> 16) == PCI_CLASS_BRIDGE_PCI) {
+
+ int iosave, memsave;
+
+ pr_debug("PCI Autoconfig: Found P2P bridge, device %d\n",
+ PCI_SLOT(pci_devfn));
+
+ /* Allocate PCI I/O and/or memory space */
+ pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1);
+
+ pciauto_prescan_setup_bridge(dev, current_bus, sub_bus,
+ &iosave, &memsave);
+ sub_bus = pciauto_bus_scan(pci_ctrl, sub_bus+1);
+ pciauto_postscan_setup_bridge(dev, current_bus, sub_bus,
+ &iosave, &memsave);
+ pciauto_bus.number = current_bus;
+
+ continue;
+
+ }
+
+ /*
+ * Found a peripheral, enable some standard
+ * settings
+ */
+
+ pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
+ pci_write_config_dword(dev, PCI_COMMAND,
+ cmdstat |
+ PCI_COMMAND_IO |
+ PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
+
+ /* Allocate PCI I/O and/or memory space */
+ pr_debug("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
+ current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn));
+
+ pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5);
+ pciauto_setup_irq(pci_ctrl, dev, pci_devfn);
+ }
+ return sub_bus;
+}
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
new file mode 100644
index 000000000..5fce16b67
--- /dev/null
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -0,0 +1,217 @@
+/*
+ * arch/xtensa/lib/strncpy_user.S
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Returns: -EFAULT if exception before terminator, N if the entire
+ * buffer filled, else strlen.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <variant/core.h>
+#include <asm/asmmacro.h>
+
+/*
+ * char *__strncpy_user(char *dst, const char *src, size_t len)
+ */
+
+#ifdef __XTENSA_EB__
+# define MASK0 0xff000000
+# define MASK1 0x00ff0000
+# define MASK2 0x0000ff00
+# define MASK3 0x000000ff
+#else
+# define MASK0 0x000000ff
+# define MASK1 0x0000ff00
+# define MASK2 0x00ff0000
+# define MASK3 0xff000000
+#endif
+
+# Register use
+# a0/ return address
+# a1/ stack pointer
+# a2/ return value
+# a3/ src
+# a4/ len
+# a5/ mask0
+# a6/ mask1
+# a7/ mask2
+# a8/ mask3
+# a9/ tmp
+# a10/ tmp
+# a11/ dst
+# a12/ tmp
+
+.text
+ENTRY(__strncpy_user)
+
+ entry sp, 16 # minimal stack frame
+ # a2/ dst, a3/ src, a4/ len
+ mov a11, a2 # leave dst in return value register
+ beqz a4, .Lret # if len is zero
+ movi a5, MASK0 # mask for byte 0
+ movi a6, MASK1 # mask for byte 1
+ movi a7, MASK2 # mask for byte 2
+ movi a8, MASK3 # mask for byte 3
+ bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned
+ bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned
+.Lsrcaligned: # return here when src is word-aligned
+ srli a12, a4, 2 # number of loop iterations with 4B per loop
+ movi a9, 3
+ bnone a11, a9, .Laligned
+ j .Ldstunaligned
+
+.Lsrc1mod2: # src address is odd
+EX(11f) l8ui a9, a3, 0 # get byte 0
+ addi a3, a3, 1 # advance src pointer
+EX(10f) s8i a9, a11, 0 # store byte 0
+ beqz a9, .Lret # if byte 0 is zero
+ addi a11, a11, 1 # advance dst pointer
+ addi a4, a4, -1 # decrement len
+ beqz a4, .Lret # if len is zero
+ bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned
+
+.Lsrc2mod4: # src address is 2 mod 4
+EX(11f) l8ui a9, a3, 0 # get byte 0
+ /* 1-cycle interlock */
+EX(10f) s8i a9, a11, 0 # store byte 0
+ beqz a9, .Lret # if byte 0 is zero
+ addi a11, a11, 1 # advance dst pointer
+ addi a4, a4, -1 # decrement len
+ beqz a4, .Lret # if len is zero
+EX(11f) l8ui a9, a3, 1 # get byte 0
+ addi a3, a3, 2 # advance src pointer
+EX(10f) s8i a9, a11, 0 # store byte 0
+ beqz a9, .Lret # if byte 0 is zero
+ addi a11, a11, 1 # advance dst pointer
+ addi a4, a4, -1 # decrement len
+ bnez a4, .Lsrcaligned # if len is nonzero
+.Lret:
+ sub a2, a11, a2 # compute strlen
+ retw
+
+/*
+ * dst is word-aligned, src is word-aligned
+ */
+ .align 4 # 1 mod 4 alignment for LOOPNEZ
+ .byte 0 # (0 mod 4 alignment for LBEG)
+.Laligned:
+#if XCHAL_HAVE_LOOPS
+ loopnez a12, .Loop1done
+#else
+ beqz a12, .Loop1done
+ slli a12, a12, 2
+ add a12, a12, a11 # a12 = end of last 4B chunck
+#endif
+.Loop1:
+EX(11f) l32i a9, a3, 0 # get word from src
+ addi a3, a3, 4 # advance src pointer
+ bnone a9, a5, .Lz0 # if byte 0 is zero
+ bnone a9, a6, .Lz1 # if byte 1 is zero
+ bnone a9, a7, .Lz2 # if byte 2 is zero
+EX(10f) s32i a9, a11, 0 # store word to dst
+ bnone a9, a8, .Lz3 # if byte 3 is zero
+ addi a11, a11, 4 # advance dst pointer
+#if !XCHAL_HAVE_LOOPS
+ blt a11, a12, .Loop1
+#endif
+
+.Loop1done:
+ bbci.l a4, 1, .L100
+ # copy 2 bytes
+EX(11f) l16ui a9, a3, 0
+ addi a3, a3, 2 # advance src pointer
+#ifdef __XTENSA_EB__
+ bnone a9, a7, .Lz0 # if byte 2 is zero
+ bnone a9, a8, .Lz1 # if byte 3 is zero
+#else
+ bnone a9, a5, .Lz0 # if byte 0 is zero
+ bnone a9, a6, .Lz1 # if byte 1 is zero
+#endif
+EX(10f) s16i a9, a11, 0
+ addi a11, a11, 2 # advance dst pointer
+.L100:
+ bbci.l a4, 0, .Lret
+EX(11f) l8ui a9, a3, 0
+ /* slot */
+EX(10f) s8i a9, a11, 0
+ beqz a9, .Lret # if byte is zero
+ addi a11, a11, 1-3 # advance dst ptr 1, but also cancel
+ # the effect of adding 3 in .Lz3 code
+ /* fall thru to .Lz3 and "retw" */
+
+.Lz3: # byte 3 is zero
+ addi a11, a11, 3 # advance dst pointer
+ sub a2, a11, a2 # compute strlen
+ retw
+.Lz0: # byte 0 is zero
+#ifdef __XTENSA_EB__
+ movi a9, 0
+#endif /* __XTENSA_EB__ */
+EX(10f) s8i a9, a11, 0
+ sub a2, a11, a2 # compute strlen
+ retw
+.Lz1: # byte 1 is zero
+#ifdef __XTENSA_EB__
+ extui a9, a9, 16, 16
+#endif /* __XTENSA_EB__ */
+EX(10f) s16i a9, a11, 0
+ addi a11, a11, 1 # advance dst pointer
+ sub a2, a11, a2 # compute strlen
+ retw
+.Lz2: # byte 2 is zero
+#ifdef __XTENSA_EB__
+ extui a9, a9, 16, 16
+#endif /* __XTENSA_EB__ */
+EX(10f) s16i a9, a11, 0
+ movi a9, 0
+EX(10f) s8i a9, a11, 2
+ addi a11, a11, 2 # advance dst pointer
+ sub a2, a11, a2 # compute strlen
+ retw
+
+ .align 4 # 1 mod 4 alignment for LOOPNEZ
+ .byte 0 # (0 mod 4 alignment for LBEG)
+.Ldstunaligned:
+/*
+ * for now just use byte copy loop
+ */
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lunalignedend
+#else
+ beqz a4, .Lunalignedend
+ add a12, a11, a4 # a12 = ending address
+#endif /* XCHAL_HAVE_LOOPS */
+.Lnextbyte:
+EX(11f) l8ui a9, a3, 0
+ addi a3, a3, 1
+EX(10f) s8i a9, a11, 0
+ beqz a9, .Lunalignedend
+ addi a11, a11, 1
+#if !XCHAL_HAVE_LOOPS
+ blt a11, a12, .Lnextbyte
+#endif
+
+.Lunalignedend:
+ sub a2, a11, a2 # compute strlen
+ retw
+
+ENDPROC(__strncpy_user)
+
+ .section .fixup, "ax"
+ .align 4
+
+ /* For now, just return -EFAULT. Future implementations might
+ * like to clear remaining kernel space, like the fixup
+ * implementation in memset(). Thus, we differentiate between
+ * load/store fixups. */
+
+10:
+11:
+ movi a2, -EFAULT
+ retw
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
new file mode 100644
index 000000000..0b956ce7f
--- /dev/null
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -0,0 +1,141 @@
+/*
+ * arch/xtensa/lib/strnlen_user.S
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Returns strnlen, including trailing zero terminator.
+ * Zero indicates error.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+#include <linux/linkage.h>
+#include <variant/core.h>
+#include <asm/asmmacro.h>
+
+/*
+ * size_t __strnlen_user(const char *s, size_t len)
+ */
+
+#ifdef __XTENSA_EB__
+# define MASK0 0xff000000
+# define MASK1 0x00ff0000
+# define MASK2 0x0000ff00
+# define MASK3 0x000000ff
+#else
+# define MASK0 0x000000ff
+# define MASK1 0x0000ff00
+# define MASK2 0x00ff0000
+# define MASK3 0xff000000
+#endif
+
+# Register use:
+# a2/ src
+# a3/ len
+# a4/ tmp
+# a5/ mask0
+# a6/ mask1
+# a7/ mask2
+# a8/ mask3
+# a9/ tmp
+# a10/ tmp
+
+.text
+ENTRY(__strnlen_user)
+
+ entry sp, 16 # minimal stack frame
+ # a2/ s, a3/ len
+ addi a4, a2, -4 # because we overincrement at the end;
+ # we compensate with load offsets of 4
+ movi a5, MASK0 # mask for byte 0
+ movi a6, MASK1 # mask for byte 1
+ movi a7, MASK2 # mask for byte 2
+ movi a8, MASK3 # mask for byte 3
+ bbsi.l a2, 0, .L1mod2 # if only 8-bit aligned
+ bbsi.l a2, 1, .L2mod4 # if only 16-bit aligned
+
+/*
+ * String is word-aligned.
+ */
+.Laligned:
+ srli a10, a3, 2 # number of loop iterations with 4B per loop
+#if XCHAL_HAVE_LOOPS
+ loopnez a10, .Ldone
+#else
+ beqz a10, .Ldone
+ slli a10, a10, 2
+ add a10, a10, a4 # a10 = end of last 4B chunk
+#endif /* XCHAL_HAVE_LOOPS */
+.Loop:
+EX(10f) l32i a9, a4, 4 # get next word of string
+ addi a4, a4, 4 # advance string pointer
+ bnone a9, a5, .Lz0 # if byte 0 is zero
+ bnone a9, a6, .Lz1 # if byte 1 is zero
+ bnone a9, a7, .Lz2 # if byte 2 is zero
+ bnone a9, a8, .Lz3 # if byte 3 is zero
+#if !XCHAL_HAVE_LOOPS
+ blt a4, a10, .Loop
+#endif
+
+.Ldone:
+EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks
+
+ bbci.l a3, 1, .L100
+ # check two more bytes (bytes 0, 1 of word)
+ addi a4, a4, 2 # advance string pointer
+ bnone a9, a5, .Lz0 # if byte 0 is zero
+ bnone a9, a6, .Lz1 # if byte 1 is zero
+.L100:
+ bbci.l a3, 0, .L101
+ # check one more byte (byte 2 of word)
+ # Actually, we don't need to check. Zero or nonzero, we'll add one.
+ # Do not add an extra one for the NULL terminator since we have
+ # exhausted the original len parameter.
+ addi a4, a4, 1 # advance string pointer
+.L101:
+ sub a2, a4, a2 # compute length
+ retw
+
+# NOTE that in several places below, we point to the byte just after
+# the zero byte in order to include the NULL terminator in the count.
+
+.Lz3: # byte 3 is zero
+ addi a4, a4, 3 # point to zero byte
+.Lz0: # byte 0 is zero
+ addi a4, a4, 1 # point just beyond zero byte
+ sub a2, a4, a2 # subtract to get length
+ retw
+.Lz1: # byte 1 is zero
+ addi a4, a4, 1+1 # point just beyond zero byte
+ sub a2, a4, a2 # subtract to get length
+ retw
+.Lz2: # byte 2 is zero
+ addi a4, a4, 2+1 # point just beyond zero byte
+ sub a2, a4, a2 # subtract to get length
+ retw
+
+.L1mod2: # address is odd
+EX(10f) l8ui a9, a4, 4 # get byte 0
+ addi a4, a4, 1 # advance string pointer
+ beqz a9, .Lz3 # if byte 0 is zero
+ bbci.l a4, 1, .Laligned # if string pointer is now word-aligned
+
+.L2mod4: # address is 2 mod 4
+ addi a4, a4, 2 # advance ptr for aligned access
+EX(10f) l32i a9, a4, 0 # get word with first two bytes of string
+ bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero
+ bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero
+ # byte 3 is zero
+ addi a4, a4, 3+1 # point just beyond zero byte
+ sub a2, a4, a2 # subtract to get length
+ retw
+
+ENDPROC(__strnlen_user)
+
+ .section .fixup, "ax"
+ .align 4
+10:
+ movi a2, 0
+ retw
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
new file mode 100644
index 000000000..64ab19713
--- /dev/null
+++ b/arch/xtensa/lib/usercopy.S
@@ -0,0 +1,284 @@
+/*
+ * arch/xtensa/lib/usercopy.S
+ *
+ * Copy to/from user space (derived from arch/xtensa/lib/hal/memcopy.S)
+ *
+ * DO NOT COMBINE this function with <arch/xtensa/lib/hal/memcopy.S>.
+ * It needs to remain separate and distinct. The hal files are part
+ * of the Xtensa link-time HAL, and those files may differ per
+ * processor configuration. Patching the kernel for another
+ * processor configuration includes replacing the hal files, and we
+ * could lose the special functionality for accessing user-space
+ * memory during such a patch. We sacrifice a little code space here
+ * in favor to simplify code maintenance.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+/*
+ * size_t __xtensa_copy_user (void *dst, const void *src, size_t len);
+ *
+ * The returned value is the number of bytes not copied. Implies zero
+ * is success.
+ *
+ * The general case algorithm is as follows:
+ * If the destination and source are both aligned,
+ * do 16B chunks with a loop, and then finish up with
+ * 8B, 4B, 2B, and 1B copies conditional on the length.
+ * If destination is aligned and source unaligned,
+ * do the same, but use SRC to align the source data.
+ * If destination is unaligned, align it by conditionally
+ * copying 1B and 2B and then retest.
+ * This code tries to use fall-through braches for the common
+ * case of aligned destinations (except for the branches to
+ * the alignment label).
+ *
+ * Register use:
+ * a0/ return address
+ * a1/ stack pointer
+ * a2/ return value
+ * a3/ src
+ * a4/ length
+ * a5/ dst
+ * a6/ tmp
+ * a7/ tmp
+ * a8/ tmp
+ * a9/ tmp
+ * a10/ tmp
+ * a11/ original length
+ */
+
+#include <linux/linkage.h>
+#include <variant/core.h>
+#include <asm/asmmacro.h>
+
+ .text
+ENTRY(__xtensa_copy_user)
+
+ entry sp, 16 # minimal stack frame
+ # a2/ dst, a3/ src, a4/ len
+ mov a5, a2 # copy dst so that a2 is return value
+ mov a11, a4 # preserve original len for error case
+.Lcommon:
+ bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2
+ bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4
+.Ldstaligned: # return here from .Ldstunaligned when dst is aligned
+ srli a7, a4, 4 # number of loop iterations with 16B
+ # per iteration
+ movi a8, 3 # if source is also aligned,
+ bnone a3, a8, .Laligned # then use word copy
+ __ssa8 a3 # set shift amount from byte offset
+ bnez a4, .Lsrcunaligned
+ movi a2, 0 # return success for len==0
+ retw
+
+/*
+ * Destination is unaligned
+ */
+
+.Ldst1mod2: # dst is only byte aligned
+ bltui a4, 7, .Lbytecopy # do short copies byte by byte
+
+ # copy 1 byte
+EX(10f) l8ui a6, a3, 0
+ addi a3, a3, 1
+EX(10f) s8i a6, a5, 0
+ addi a5, a5, 1
+ addi a4, a4, -1
+ bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
+ # return to main algorithm
+.Ldst2mod4: # dst 16-bit aligned
+ # copy 2 bytes
+ bltui a4, 6, .Lbytecopy # do short copies byte by byte
+EX(10f) l8ui a6, a3, 0
+EX(10f) l8ui a7, a3, 1
+ addi a3, a3, 2
+EX(10f) s8i a6, a5, 0
+EX(10f) s8i a7, a5, 1
+ addi a5, a5, 2
+ addi a4, a4, -2
+ j .Ldstaligned # dst is now aligned, return to main algorithm
+
+/*
+ * Byte by byte copy
+ */
+ .align 4
+ .byte 0 # 1 mod 4 alignment for LOOPNEZ
+ # (0 mod 4 alignment for LBEG)
+.Lbytecopy:
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lbytecopydone
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a4, .Lbytecopydone
+ add a7, a3, a4 # a7 = end address for source
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lnextbyte:
+EX(10f) l8ui a6, a3, 0
+ addi a3, a3, 1
+EX(10f) s8i a6, a5, 0
+ addi a5, a5, 1
+#if !XCHAL_HAVE_LOOPS
+ blt a3, a7, .Lnextbyte
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbytecopydone:
+ movi a2, 0 # return success for len bytes copied
+ retw
+
+/*
+ * Destination and source are word-aligned.
+ */
+ # copy 16 bytes per iteration for word-aligned dst and word-aligned src
+ .align 4 # 1 mod 4 alignment for LOOPNEZ
+ .byte 0 # (0 mod 4 alignment for LBEG)
+.Laligned:
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .Loop1done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .Loop1done
+ slli a8, a7, 4
+ add a8, a8, a3 # a8 = end of last 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1:
+EX(10f) l32i a6, a3, 0
+EX(10f) l32i a7, a3, 4
+EX(10f) s32i a6, a5, 0
+EX(10f) l32i a6, a3, 8
+EX(10f) s32i a7, a5, 4
+EX(10f) l32i a7, a3, 12
+EX(10f) s32i a6, a5, 8
+ addi a3, a3, 16
+EX(10f) s32i a7, a5, 12
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ blt a3, a8, .Loop1
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop1done:
+ bbci.l a4, 3, .L2
+ # copy 8 bytes
+EX(10f) l32i a6, a3, 0
+EX(10f) l32i a7, a3, 4
+ addi a3, a3, 8
+EX(10f) s32i a6, a5, 0
+EX(10f) s32i a7, a5, 4
+ addi a5, a5, 8
+.L2:
+ bbci.l a4, 2, .L3
+ # copy 4 bytes
+EX(10f) l32i a6, a3, 0
+ addi a3, a3, 4
+EX(10f) s32i a6, a5, 0
+ addi a5, a5, 4
+.L3:
+ bbci.l a4, 1, .L4
+ # copy 2 bytes
+EX(10f) l16ui a6, a3, 0
+ addi a3, a3, 2
+EX(10f) s16i a6, a5, 0
+ addi a5, a5, 2
+.L4:
+ bbci.l a4, 0, .L5
+ # copy 1 byte
+EX(10f) l8ui a6, a3, 0
+EX(10f) s8i a6, a5, 0
+.L5:
+ movi a2, 0 # return success for len bytes copied
+ retw
+
+/*
+ * Destination is aligned, Source is unaligned
+ */
+
+ .align 4
+ .byte 0 # 1 mod 4 alignement for LOOPNEZ
+ # (0 mod 4 alignment for LBEG)
+.Lsrcunaligned:
+ # copy 16 bytes per iteration for word-aligned dst and unaligned src
+ and a10, a3, a8 # save unalignment offset for below
+ sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
+EX(10f) l32i a6, a3, 0 # load first word
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .Loop2done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .Loop2done
+ slli a12, a7, 4
+ add a12, a12, a3 # a12 = end of last 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop2:
+EX(10f) l32i a7, a3, 4
+EX(10f) l32i a8, a3, 8
+ __src_b a6, a6, a7
+EX(10f) s32i a6, a5, 0
+EX(10f) l32i a9, a3, 12
+ __src_b a7, a7, a8
+EX(10f) s32i a7, a5, 4
+EX(10f) l32i a6, a3, 16
+ __src_b a8, a8, a9
+EX(10f) s32i a8, a5, 8
+ addi a3, a3, 16
+ __src_b a9, a9, a6
+EX(10f) s32i a9, a5, 12
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ blt a3, a12, .Loop2
+#endif /* !XCHAL_HAVE_LOOPS */
+.Loop2done:
+ bbci.l a4, 3, .L12
+ # copy 8 bytes
+EX(10f) l32i a7, a3, 4
+EX(10f) l32i a8, a3, 8
+ __src_b a6, a6, a7
+EX(10f) s32i a6, a5, 0
+ addi a3, a3, 8
+ __src_b a7, a7, a8
+EX(10f) s32i a7, a5, 4
+ addi a5, a5, 8
+ mov a6, a8
+.L12:
+ bbci.l a4, 2, .L13
+ # copy 4 bytes
+EX(10f) l32i a7, a3, 4
+ addi a3, a3, 4
+ __src_b a6, a6, a7
+EX(10f) s32i a6, a5, 0
+ addi a5, a5, 4
+ mov a6, a7
+.L13:
+ add a3, a3, a10 # readjust a3 with correct misalignment
+ bbci.l a4, 1, .L14
+ # copy 2 bytes
+EX(10f) l8ui a6, a3, 0
+EX(10f) l8ui a7, a3, 1
+ addi a3, a3, 2
+EX(10f) s8i a6, a5, 0
+EX(10f) s8i a7, a5, 1
+ addi a5, a5, 2
+.L14:
+ bbci.l a4, 0, .L15
+ # copy 1 byte
+EX(10f) l8ui a6, a3, 0
+EX(10f) s8i a6, a5, 0
+.L15:
+ movi a2, 0 # return success for len bytes copied
+ retw
+
+ENDPROC(__xtensa_copy_user)
+
+ .section .fixup, "ax"
+ .align 4
+
+/* a2 = original dst; a5 = current dst; a11= original len
+ * bytes_copied = a5 - a2
+ * retval = bytes_not_copied = original len - bytes_copied
+ * retval = a11 - (a5 - a2)
+ */
+
+
+10:
+ sub a2, a5, a2 /* a2 <-- bytes copied */
+ sub a2, a11, a2 /* a2 <-- bytes not copied */
+ retw
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
new file mode 100644
index 000000000..734888a00
--- /dev/null
+++ b/arch/xtensa/mm/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the Linux/Xtensa-specific parts of the memory manager.
+#
+
+obj-y := init.o misc.o
+obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-$(CONFIG_KASAN) += kasan_init.o
+
+KASAN_SANITIZE_fault.o := n
+KASAN_SANITIZE_kasan_init.o := n
+KASAN_SANITIZE_mmu.o := n
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
new file mode 100644
index 000000000..d1ebe67c6
--- /dev/null
+++ b/arch/xtensa/mm/cache.c
@@ -0,0 +1,328 @@
+/*
+ * arch/xtensa/mm/cache.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2006 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor
+ * Marc Gauthier
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/bootmem.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+
+#include <asm/bootparam.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+/*
+ * Note:
+ * The kernel provides one architecture bit PG_arch_1 in the page flags that
+ * can be used for cache coherency.
+ *
+ * I$-D$ coherency.
+ *
+ * The Xtensa architecture doesn't keep the instruction cache coherent with
+ * the data cache. We use the architecture bit to indicate if the caches
+ * are coherent. The kernel clears this bit whenever a page is added to the
+ * page cache. At that time, the caches might not be in sync. We, therefore,
+ * define this flag as 'clean' if set.
+ *
+ * D-cache aliasing.
+ *
+ * With cache aliasing, we have to always flush the cache when pages are
+ * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
+ * page.
+ *
+ *
+ *
+ */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+static inline void kmap_invalidate_coherent(struct page *page,
+ unsigned long vaddr)
+{
+ if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
+ unsigned long kvaddr;
+
+ if (!PageHighMem(page)) {
+ kvaddr = (unsigned long)page_to_virt(page);
+
+ __invalidate_dcache_page(kvaddr);
+ } else {
+ kvaddr = TLBTEMP_BASE_1 +
+ (page_to_phys(page) & DCACHE_ALIAS_MASK);
+
+ preempt_disable();
+ __invalidate_dcache_page_alias(kvaddr,
+ page_to_phys(page));
+ preempt_enable();
+ }
+ }
+}
+
+static inline void *coherent_kvaddr(struct page *page, unsigned long base,
+ unsigned long vaddr, unsigned long *paddr)
+{
+ if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
+ *paddr = page_to_phys(page);
+ return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
+ } else {
+ *paddr = 0;
+ return page_to_virt(page);
+ }
+}
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ unsigned long paddr;
+ void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
+
+ preempt_disable();
+ kmap_invalidate_coherent(page, vaddr);
+ set_bit(PG_arch_1, &page->flags);
+ clear_page_alias(kvaddr, paddr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(clear_user_highpage);
+
+void copy_user_highpage(struct page *dst, struct page *src,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ unsigned long dst_paddr, src_paddr;
+ void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
+ &dst_paddr);
+ void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
+ &src_paddr);
+
+ preempt_disable();
+ kmap_invalidate_coherent(dst, vaddr);
+ set_bit(PG_arch_1, &dst->flags);
+ copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(copy_user_highpage);
+
+/*
+ * Any time the kernel writes to a user page cache page, or it is about to
+ * read from a page cache page this routine is called.
+ *
+ */
+
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping_file(page);
+
+ /*
+ * If we have a mapping but the page is not mapped to user-space
+ * yet, we simply mark this page dirty and defer flushing the
+ * caches until update_mmu().
+ */
+
+ if (mapping && !mapping_mapped(mapping)) {
+ if (!test_bit(PG_arch_1, &page->flags))
+ set_bit(PG_arch_1, &page->flags);
+ return;
+
+ } else {
+
+ unsigned long phys = page_to_phys(page);
+ unsigned long temp = page->index << PAGE_SHIFT;
+ unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
+ unsigned long virt;
+
+ /*
+ * Flush the page in kernel space and user space.
+ * Note that we can omit that step if aliasing is not
+ * an issue, but we do have to synchronize I$ and D$
+ * if we have a mapping.
+ */
+
+ if (!alias && !mapping)
+ return;
+
+ preempt_disable();
+ virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(virt, phys);
+
+ virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
+
+ if (alias)
+ __flush_invalidate_dcache_page_alias(virt, phys);
+
+ if (mapping)
+ __invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
+ }
+
+ /* There shouldn't be an entry in the cache for this page anymore. */
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+/*
+ * For now, flush the whole cache. FIXME??
+ */
+
+void local_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ __flush_invalidate_dcache_all();
+ __invalidate_icache_all();
+}
+EXPORT_SYMBOL(local_flush_cache_range);
+
+/*
+ * Remove any entry in the cache for this page.
+ *
+ * Note that this function is only called for user pages, so use the
+ * alias versions of the cache flush functions.
+ */
+
+void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
+ unsigned long pfn)
+{
+ /* Note that we have to use the 'alias' address to avoid multi-hit */
+
+ unsigned long phys = page_to_phys(pfn_to_page(pfn));
+ unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
+
+ preempt_disable();
+ __flush_invalidate_dcache_page_alias(virt, phys);
+ __invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
+}
+EXPORT_SYMBOL(local_flush_cache_page);
+
+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
+
+void
+update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
+{
+ unsigned long pfn = pte_pfn(*ptep);
+ struct page *page;
+
+ if (!pfn_valid(pfn))
+ return;
+
+ page = pfn_to_page(pfn);
+
+ /* Invalidate old entry in TLBs */
+
+ flush_tlb_page(vma, addr);
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+ if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
+ unsigned long phys = page_to_phys(page);
+ unsigned long tmp;
+
+ preempt_disable();
+ tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(tmp, phys);
+ tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(tmp, phys);
+ __invalidate_icache_page_alias(tmp, phys);
+ preempt_enable();
+
+ clear_bit(PG_arch_1, &page->flags);
+ }
+#else
+ if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
+ && (vma->vm_flags & VM_EXEC) != 0) {
+ unsigned long paddr = (unsigned long)kmap_atomic(page);
+ __flush_dcache_page(paddr);
+ __invalidate_icache_page(paddr);
+ set_bit(PG_arch_1, &page->flags);
+ kunmap_atomic((void *)paddr);
+ }
+#endif
+}
+
+/*
+ * access_process_vm() has called get_user_pages(), which has done a
+ * flush_dcache_page() on the page.
+ */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ unsigned long phys = page_to_phys(page);
+ unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
+
+ /* Flush and invalidate user page if aliased. */
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
+ __flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
+ }
+
+ /* Copy data */
+
+ memcpy(dst, src, len);
+
+ /*
+ * Flush and invalidate kernel page if aliased and synchronize
+ * data and instruction caches for executable pages.
+ */
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+
+ preempt_disable();
+ __flush_invalidate_dcache_range((unsigned long) dst, len);
+ if ((vma->vm_flags & VM_EXEC) != 0)
+ __invalidate_icache_page_alias(t, phys);
+ preempt_enable();
+
+ } else if ((vma->vm_flags & VM_EXEC) != 0) {
+ __flush_dcache_range((unsigned long)dst,len);
+ __invalidate_icache_range((unsigned long) dst, len);
+ }
+}
+
+extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ unsigned long phys = page_to_phys(page);
+ unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
+
+ /*
+ * Flush user page if aliased.
+ * (Note: a simply flush would be sufficient)
+ */
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
+ __flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
+ }
+
+ memcpy(dst, src, len);
+}
+
+#endif
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
new file mode 100644
index 000000000..2ab0e0dcd
--- /dev/null
+++ b/arch/xtensa/mm/fault.c
@@ -0,0 +1,255 @@
+// TODO VM_EXEC flag work-around, cache aliasing
+/*
+ * arch/xtensa/mm/fault.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2010 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/extable.h>
+#include <linux/hardirq.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/hardirq.h>
+#include <asm/pgalloc.h>
+
+DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
+void bad_page_fault(struct pt_regs*, unsigned long, int);
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * Note: does not handle Miss and MultiHit.
+ */
+
+void do_page_fault(struct pt_regs *regs)
+{
+ struct vm_area_struct * vma;
+ struct mm_struct *mm = current->mm;
+ unsigned int exccause = regs->exccause;
+ unsigned int address = regs->excvaddr;
+ int code;
+
+ int is_write, is_exec;
+ vm_fault_t fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ code = SEGV_MAPERR;
+
+ /* We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ */
+ if (address >= TASK_SIZE && !user_mode(regs))
+ goto vmalloc_fault;
+
+ /* If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (faulthandler_disabled() || !mm) {
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
+ }
+
+ is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
+ is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
+ exccause == EXCCAUSE_ITLB_MISS ||
+ exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
+
+ pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
+ current->comm, current->pid,
+ address, exccause, regs->pc,
+ is_write ? "w" : "", is_exec ? "x" : "");
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+retry:
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+ /* Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+
+good_area:
+ code = SEGV_ACCERR;
+
+ if (is_write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
+ } else if (is_exec) {
+ if (!(vma->vm_flags & VM_EXEC))
+ goto bad_area;
+ } else /* Allow read even from write-only pages. */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+ goto bad_area;
+
+ /* If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ if (flags & VM_FAULT_MAJOR)
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
+ else
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
+
+ return;
+
+ /* Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+ if (user_mode(regs)) {
+ current->thread.bad_vaddr = address;
+ current->thread.error_code = is_write;
+ force_sig_fault(SIGSEGV, code, (void *) address, current);
+ return;
+ }
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
+
+
+ /* We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (!user_mode(regs))
+ bad_page_fault(regs, address, SIGKILL);
+ else
+ pagefault_out_of_memory();
+ return;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ current->thread.bad_vaddr = address;
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address, current);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ bad_page_fault(regs, address, SIGBUS);
+ return;
+
+vmalloc_fault:
+ {
+ /* Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ struct mm_struct *act_mm = current->active_mm;
+ int index = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ if (act_mm == NULL)
+ goto bad_page_fault;
+
+ pgd = act_mm->pgd + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto bad_page_fault;
+
+ pgd_val(*pgd) = pgd_val(*pgd_k);
+
+ pmd = pmd_offset(pgd, address);
+ pmd_k = pmd_offset(pgd_k, address);
+ if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
+ goto bad_page_fault;
+
+ pmd_val(*pmd) = pmd_val(*pmd_k);
+ pte_k = pte_offset_kernel(pmd_k, address);
+
+ if (!pte_present(*pte_k))
+ goto bad_page_fault;
+ return;
+ }
+bad_page_fault:
+ bad_page_fault(regs, address, SIGKILL);
+ return;
+}
+
+
+void
+bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
+{
+ extern void die(const char*, struct pt_regs*, long);
+ const struct exception_table_entry *entry;
+
+ /* Are we prepared to handle this kernel fault? */
+ if ((entry = search_exception_tables(regs->pc)) != NULL) {
+ pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
+ current->comm, regs->pc, entry->fixup);
+ current->thread.bad_uaddr = address;
+ regs->pc = entry->fixup;
+ return;
+ }
+
+ /* Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ pr_alert("Unable to handle kernel paging request at virtual "
+ "address %08lx\n pc = %08lx, ra = %08lx\n",
+ address, regs->pc, regs->areg[0]);
+ die("Oops", regs, sig);
+ do_exit(sig);
+}
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
new file mode 100644
index 000000000..184ceadcc
--- /dev/null
+++ b/arch/xtensa/mm/highmem.c
@@ -0,0 +1,95 @@
+/*
+ * High memory support for Xtensa architecture
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
+wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
+
+static void __init kmap_waitqueues_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i)
+ init_waitqueue_head(pkmap_map_wait_arr + i);
+}
+#else
+static inline void kmap_waitqueues_init(void)
+{
+}
+#endif
+
+static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
+{
+ return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS +
+ color;
+}
+
+void *kmap_atomic(struct page *page)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ preempt_disable();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ idx = kmap_idx(kmap_atomic_idx_push(),
+ DCACHE_ALIAS(page_to_phys(page)));
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte + idx)));
+#endif
+ set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC));
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+ if (kvaddr >= (void *)FIXADDR_START &&
+ kvaddr < (void *)FIXADDR_TOP) {
+ int idx = kmap_idx(kmap_atomic_idx(),
+ DCACHE_ALIAS((unsigned long)kvaddr));
+
+ /*
+ * Force other mappings to Oops if they'll try to access this
+ * pte without first remap it. Keeping stale mappings around
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
+ pte_clear(&init_mm, kvaddr, kmap_pte + idx);
+ local_flush_tlb_kernel_range((unsigned long)kvaddr,
+ (unsigned long)kvaddr + PAGE_SIZE);
+
+ kmap_atomic_idx_pop();
+ }
+
+ pagefault_enable();
+ preempt_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+ kmap_waitqueues_init();
+}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
new file mode 100644
index 000000000..34aead7dc
--- /dev/null
+++ b/arch/xtensa/mm/init.c
@@ -0,0 +1,269 @@
+/*
+ * arch/xtensa/mm/init.c
+ *
+ * Derived from MIPS, PPC.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Marc Gauthier
+ * Kevin Chea
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bootmem.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/mm.h>
+#include <linux/of_fdt.h>
+#include <linux/dma-contiguous.h>
+
+#include <asm/bootparam.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+#include <asm/sysmem.h>
+
+/*
+ * Initialize the bootmem system and give it all low memory we have available.
+ */
+
+void __init bootmem_init(void)
+{
+ /* Reserve all memory below PHYS_OFFSET, as memory
+ * accounting doesn't work for pages below that address.
+ *
+ * If PHYS_OFFSET is zero reserve page at address 0:
+ * successfull allocations should never return NULL.
+ */
+ if (PHYS_OFFSET)
+ memblock_reserve(0, PHYS_OFFSET);
+ else
+ memblock_reserve(0, 1);
+
+ early_init_fdt_scan_reserved_mem();
+
+ if (!memblock_phys_mem_size())
+ panic("No memory found!\n");
+
+ min_low_pfn = PFN_UP(memblock_start_of_DRAM());
+ min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
+ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ max_low_pfn = min(max_pfn, MAX_LOW_PFN);
+
+ memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+ dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
+
+ memblock_dump_all();
+}
+
+
+void __init zones_init(void)
+{
+ /* All pages are DMA-able, so we put them all in the DMA zone. */
+ unsigned long zones_size[MAX_NR_ZONES] = {
+ [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
+#ifdef CONFIG_HIGHMEM
+ [ZONE_HIGHMEM] = max_pfn - max_low_pfn,
+#endif
+ };
+ free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
+}
+
+#ifdef CONFIG_HIGHMEM
+static void __init free_area_high(unsigned long pfn, unsigned long end)
+{
+ for (; pfn < end; pfn++)
+ free_highmem_page(pfn_to_page(pfn));
+}
+
+static void __init free_highpages(void)
+{
+ unsigned long max_low = max_low_pfn;
+ struct memblock_region *mem, *res;
+
+ reset_all_zones_managed_pages();
+ /* set highmem page free */
+ for_each_memblock(memory, mem) {
+ unsigned long start = memblock_region_memory_base_pfn(mem);
+ unsigned long end = memblock_region_memory_end_pfn(mem);
+
+ /* Ignore complete lowmem entries */
+ if (end <= max_low)
+ continue;
+
+ if (memblock_is_nomap(mem))
+ continue;
+
+ /* Truncate partial highmem entries */
+ if (start < max_low)
+ start = max_low;
+
+ /* Find and exclude any reserved regions */
+ for_each_memblock(reserved, res) {
+ unsigned long res_start, res_end;
+
+ res_start = memblock_region_reserved_base_pfn(res);
+ res_end = memblock_region_reserved_end_pfn(res);
+
+ if (res_end < start)
+ continue;
+ if (res_start < start)
+ res_start = start;
+ if (res_start > end)
+ res_start = end;
+ if (res_end > end)
+ res_end = end;
+ if (res_start != start)
+ free_area_high(start, res_start);
+ start = res_end;
+ if (start == end)
+ break;
+ }
+
+ /* And now free anything which remains */
+ if (start < end)
+ free_area_high(start, end);
+ }
+}
+#else
+static void __init free_highpages(void)
+{
+}
+#endif
+
+/*
+ * Initialize memory pages.
+ */
+
+void __init mem_init(void)
+{
+ free_highpages();
+
+ max_mapnr = max_pfn - ARCH_PFN_OFFSET;
+ high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
+
+ free_all_bootmem();
+
+ mem_init_print_info(NULL);
+ pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_KASAN
+ " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
+#endif
+#ifdef CONFIG_MMU
+ " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
+#endif
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+#endif
+ " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n"
+ " .text : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .data : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .init : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n",
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
+ KASAN_SHADOW_SIZE >> 20,
+#endif
+#ifdef CONFIG_MMU
+ VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
+#ifdef CONFIG_HIGHMEM
+ PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+ (LAST_PKMAP*PAGE_SIZE) >> 10,
+ FIXADDR_START, FIXADDR_TOP,
+ (FIXADDR_TOP - FIXADDR_START) >> 10,
+#endif
+ PAGE_OFFSET, PAGE_OFFSET +
+ (max_low_pfn - min_low_pfn) * PAGE_SIZE,
+#else
+ min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
+#endif
+ ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
+ (unsigned long)_text, (unsigned long)_etext,
+ (unsigned long)(_etext - _text) >> 10,
+ (unsigned long)__start_rodata, (unsigned long)_sdata,
+ (unsigned long)(_sdata - __start_rodata) >> 10,
+ (unsigned long)_sdata, (unsigned long)_edata,
+ (unsigned long)(_edata - _sdata) >> 10,
+ (unsigned long)__init_begin, (unsigned long)__init_end,
+ (unsigned long)(__init_end - __init_begin) >> 10,
+ (unsigned long)__bss_start, (unsigned long)__bss_stop,
+ (unsigned long)(__bss_stop - __bss_start) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern int initrd_is_mapped;
+
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (initrd_is_mapped)
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
+}
+#endif
+
+void free_initmem(void)
+{
+ free_initmem_default(-1);
+}
+
+static void __init parse_memmap_one(char *p)
+{
+ char *oldp;
+ unsigned long start_at, mem_size;
+
+ if (!p)
+ return;
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return;
+
+ switch (*p) {
+ case '@':
+ start_at = memparse(p + 1, &p);
+ memblock_add(start_at, mem_size);
+ break;
+
+ case '$':
+ start_at = memparse(p + 1, &p);
+ memblock_reserve(start_at, mem_size);
+ break;
+
+ case 0:
+ memblock_reserve(mem_size, -mem_size);
+ break;
+
+ default:
+ pr_warn("Unrecognized memmap syntax: %s\n", p);
+ break;
+ }
+}
+
+static int __init parse_memmap_opt(char *str)
+{
+ while (str) {
+ char *k = strchr(str, ',');
+
+ if (k)
+ *k++ = 0;
+
+ parse_memmap_one(str);
+ str = k;
+ }
+
+ return 0;
+}
+early_param("memmap", parse_memmap_opt);
diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c
new file mode 100644
index 000000000..d89c3c5fd
--- /dev/null
+++ b/arch/xtensa/mm/ioremap.c
@@ -0,0 +1,68 @@
+/*
+ * ioremap implementation.
+ *
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
+ pgprot_t prot)
+{
+ unsigned long offset = paddr & ~PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(paddr);
+ struct vm_struct *area;
+ unsigned long vaddr;
+ int err;
+
+ paddr &= PAGE_MASK;
+
+ WARN_ON(pfn_valid(pfn));
+
+ size = PAGE_ALIGN(offset + size);
+
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ vaddr = (unsigned long)area->addr;
+ area->phys_addr = paddr;
+
+ err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);
+
+ if (err) {
+ vunmap((void *)vaddr);
+ return NULL;
+ }
+
+ flush_cache_vmap(vaddr, vaddr + size);
+ return (void __iomem *)(offset + vaddr);
+}
+
+void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size)
+{
+ return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL));
+}
+EXPORT_SYMBOL(xtensa_ioremap_nocache);
+
+void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size)
+{
+ return xtensa_ioremap(addr, size, PAGE_KERNEL);
+}
+EXPORT_SYMBOL(xtensa_ioremap_cache);
+
+void xtensa_iounmap(volatile void __iomem *io_addr)
+{
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+
+ vunmap(addr);
+}
+EXPORT_SYMBOL(xtensa_iounmap);
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
new file mode 100644
index 000000000..6b532b6bd
--- /dev/null
+++ b/arch/xtensa/mm/kasan_init.c
@@ -0,0 +1,95 @@
+/*
+ * Xtensa KASAN shadow map initialization
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2017 Cadence Design Systems Inc.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/init_task.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <asm/initialize_mmu.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+void __init kasan_early_init(void)
+{
+ unsigned long vaddr = KASAN_SHADOW_START;
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_zero_pte + i,
+ mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL));
+
+ for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
+ BUG_ON(!pmd_none(*pmd));
+ set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte));
+ }
+ early_trap_init();
+}
+
+static void __init populate(void *start, void *end)
+{
+ unsigned long n_pages = (end - start) / PAGE_SIZE;
+ unsigned long n_pmds = n_pages / PTRS_PER_PTE;
+ unsigned long i, j;
+ unsigned long vaddr = (unsigned long)start;
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+
+ pr_debug("%s: %p - %p\n", __func__, start, end);
+
+ for (i = j = 0; i < n_pmds; ++i) {
+ int k;
+
+ for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
+ phys_addr_t phys =
+ memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
+ MEMBLOCK_ALLOC_ANYWHERE);
+
+ set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+ }
+ }
+
+ for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
+ set_pmd(pmd + i, __pmd((unsigned long)pte));
+
+ local_flush_tlb_all();
+ memset(start, 0, end - start);
+}
+
+void __init kasan_init(void)
+{
+ int i;
+
+ BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
+ (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
+ BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
+
+ /*
+ * Replace shadow map pages that cover addresses from VMALLOC area
+ * start to the end of KSEG with clean writable pages.
+ */
+ populate(kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
+
+ /* Write protect kasan_zero_page and zero-initialize it again. */
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_zero_pte + i,
+ mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO));
+
+ local_flush_tlb_all();
+ memset(kasan_zero_page, 0, PAGE_SIZE);
+
+ /* At this point kasan is fully initialized. Enable error messages. */
+ current->kasan_depth = 0;
+ pr_info("KernelAddressSanitizer initialized\n");
+}
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
new file mode 100644
index 000000000..11a01c3e9
--- /dev/null
+++ b/arch/xtensa/mm/misc.S
@@ -0,0 +1,484 @@
+/*
+ * arch/xtensa/mm/misc.S
+ *
+ * Miscellaneous assembly functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheasm.h>
+#include <asm/tlbflush.h>
+
+
+/*
+ * clear_page and clear_user_page are the same for non-cache-aliased configs.
+ *
+ * clear_page (unsigned long page)
+ * a2
+ */
+
+ENTRY(clear_page)
+
+ entry a1, 16
+
+ movi a3, 0
+ __loopi a2, a7, PAGE_SIZE, 32
+ s32i a3, a2, 0
+ s32i a3, a2, 4
+ s32i a3, a2, 8
+ s32i a3, a2, 12
+ s32i a3, a2, 16
+ s32i a3, a2, 20
+ s32i a3, a2, 24
+ s32i a3, a2, 28
+ __endla a2, a7, 32
+
+ retw
+
+ENDPROC(clear_page)
+
+/*
+ * copy_page and copy_user_page are the same for non-cache-aliased configs.
+ *
+ * copy_page (void *to, void *from)
+ * a2 a3
+ */
+
+ENTRY(copy_page)
+
+ entry a1, 16
+
+ __loopi a2, a4, PAGE_SIZE, 32
+
+ l32i a8, a3, 0
+ l32i a9, a3, 4
+ s32i a8, a2, 0
+ s32i a9, a2, 4
+
+ l32i a8, a3, 8
+ l32i a9, a3, 12
+ s32i a8, a2, 8
+ s32i a9, a2, 12
+
+ l32i a8, a3, 16
+ l32i a9, a3, 20
+ s32i a8, a2, 16
+ s32i a9, a2, 20
+
+ l32i a8, a3, 24
+ l32i a9, a3, 28
+ s32i a8, a2, 24
+ s32i a9, a2, 28
+
+ addi a2, a2, 32
+ addi a3, a3, 32
+
+ __endl a2, a4
+
+ retw
+
+ENDPROC(copy_page)
+
+#ifdef CONFIG_MMU
+/*
+ * If we have to deal with cache aliasing, we use temporary memory mappings
+ * to ensure that the source and destination pages have the same color as
+ * the virtual address. We use way 0 and 1 for temporary mappings in such cases.
+ *
+ * The temporary DTLB entries shouldn't be flushed by interrupts, but are
+ * flushed by preemptive task switches. Special code in the
+ * fast_second_level_miss handler re-established the temporary mapping.
+ * It requires that the PPNs for the destination and source addresses are
+ * in a6, and a7, respectively.
+ */
+
+/* TLB miss exceptions are treated special in the following region */
+
+ENTRY(__tlbtemp_mapping_start)
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+/*
+ * clear_page_alias(void *addr, unsigned long paddr)
+ * a2 a3
+ */
+
+ENTRY(clear_page_alias)
+
+ entry a1, 32
+
+ /* Skip setting up a temporary DTLB if not aliased low page. */
+
+ movi a5, PAGE_OFFSET
+ movi a6, 0
+ beqz a3, 1f
+
+ /* Setup a temporary DTLB for the addr. */
+
+ addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ mov a4, a2
+ wdtlb a6, a2
+ dsync
+
+1: movi a3, 0
+ __loopi a2, a7, PAGE_SIZE, 32
+ s32i a3, a2, 0
+ s32i a3, a2, 4
+ s32i a3, a2, 8
+ s32i a3, a2, 12
+ s32i a3, a2, 16
+ s32i a3, a2, 20
+ s32i a3, a2, 24
+ s32i a3, a2, 28
+ __endla a2, a7, 32
+
+ bnez a6, 1f
+ retw
+
+ /* We need to invalidate the temporary idtlb entry, if any. */
+
+1: idtlb a4
+ dsync
+
+ retw
+
+ENDPROC(clear_page_alias)
+
+/*
+ * copy_page_alias(void *to, void *from,
+ * a2 a3
+ * unsigned long to_paddr, unsigned long from_paddr)
+ * a4 a5
+ */
+
+ENTRY(copy_page_alias)
+
+ entry a1, 32
+
+ /* Skip setting up a temporary DTLB for destination if not aliased. */
+
+ movi a6, 0
+ movi a7, 0
+ beqz a4, 1f
+
+ /* Setup a temporary DTLB for destination. */
+
+ addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ wdtlb a6, a2
+ dsync
+
+ /* Skip setting up a temporary DTLB for source if not aliased. */
+
+1: beqz a5, 1f
+
+ /* Setup a temporary DTLB for source. */
+
+ addi a7, a5, PAGE_KERNEL
+ addi a8, a3, 1 # way1
+
+ wdtlb a7, a8
+ dsync
+
+1: __loopi a2, a4, PAGE_SIZE, 32
+
+ l32i a8, a3, 0
+ l32i a9, a3, 4
+ s32i a8, a2, 0
+ s32i a9, a2, 4
+
+ l32i a8, a3, 8
+ l32i a9, a3, 12
+ s32i a8, a2, 8
+ s32i a9, a2, 12
+
+ l32i a8, a3, 16
+ l32i a9, a3, 20
+ s32i a8, a2, 16
+ s32i a9, a2, 20
+
+ l32i a8, a3, 24
+ l32i a9, a3, 28
+ s32i a8, a2, 24
+ s32i a9, a2, 28
+
+ addi a2, a2, 32
+ addi a3, a3, 32
+
+ __endl a2, a4
+
+ /* We need to invalidate any temporary mapping! */
+
+ bnez a6, 1f
+ bnez a7, 2f
+ retw
+
+1: addi a2, a2, -PAGE_SIZE
+ idtlb a2
+ dsync
+ bnez a7, 2f
+ retw
+
+2: addi a3, a3, -PAGE_SIZE+1
+ idtlb a3
+ dsync
+
+ retw
+
+ENDPROC(copy_page_alias)
+
+#endif
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
+/*
+ * void __flush_invalidate_dcache_page_alias (addr, phys)
+ * a2 a3
+ */
+
+ENTRY(__flush_invalidate_dcache_page_alias)
+
+ entry sp, 16
+
+ movi a7, 0 # required for exception handler
+ addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ mov a4, a2
+ wdtlb a6, a2
+ dsync
+
+ ___flush_invalidate_dcache_page a2 a3
+
+ idtlb a4
+ dsync
+
+ retw
+
+ENDPROC(__flush_invalidate_dcache_page_alias)
+
+/*
+ * void __invalidate_dcache_page_alias (addr, phys)
+ * a2 a3
+ */
+
+ENTRY(__invalidate_dcache_page_alias)
+
+ entry sp, 16
+
+ movi a7, 0 # required for exception handler
+ addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ mov a4, a2
+ wdtlb a6, a2
+ dsync
+
+ ___invalidate_dcache_page a2 a3
+
+ idtlb a4
+ dsync
+
+ retw
+
+ENDPROC(__invalidate_dcache_page_alias)
+#endif
+
+ENTRY(__tlbtemp_mapping_itlb)
+
+#if (ICACHE_WAY_SIZE > PAGE_SIZE)
+
+ENTRY(__invalidate_icache_page_alias)
+
+ entry sp, 16
+
+ addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
+ mov a4, a2
+ witlb a6, a2
+ isync
+
+ ___invalidate_icache_page a2 a3
+
+ iitlb a4
+ isync
+ retw
+
+ENDPROC(__invalidate_icache_page_alias)
+
+#endif
+
+/* End of special treatment in tlb miss exception */
+
+ENTRY(__tlbtemp_mapping_end)
+
+#endif /* CONFIG_MMU
+
+/*
+ * void __invalidate_icache_page(ulong start)
+ */
+
+ENTRY(__invalidate_icache_page)
+
+ entry sp, 16
+
+ ___invalidate_icache_page a2 a3
+ isync
+
+ retw
+
+ENDPROC(__invalidate_icache_page)
+
+/*
+ * void __invalidate_dcache_page(ulong start)
+ */
+
+ENTRY(__invalidate_dcache_page)
+
+ entry sp, 16
+
+ ___invalidate_dcache_page a2 a3
+ dsync
+
+ retw
+
+ENDPROC(__invalidate_dcache_page)
+
+/*
+ * void __flush_invalidate_dcache_page(ulong start)
+ */
+
+ENTRY(__flush_invalidate_dcache_page)
+
+ entry sp, 16
+
+ ___flush_invalidate_dcache_page a2 a3
+
+ dsync
+ retw
+
+ENDPROC(__flush_invalidate_dcache_page)
+
+/*
+ * void __flush_dcache_page(ulong start)
+ */
+
+ENTRY(__flush_dcache_page)
+
+ entry sp, 16
+
+ ___flush_dcache_page a2 a3
+
+ dsync
+ retw
+
+ENDPROC(__flush_dcache_page)
+
+/*
+ * void __invalidate_icache_range(ulong start, ulong size)
+ */
+
+ENTRY(__invalidate_icache_range)
+
+ entry sp, 16
+
+ ___invalidate_icache_range a2 a3 a4
+ isync
+
+ retw
+
+ENDPROC(__invalidate_icache_range)
+
+/*
+ * void __flush_invalidate_dcache_range(ulong start, ulong size)
+ */
+
+ENTRY(__flush_invalidate_dcache_range)
+
+ entry sp, 16
+
+ ___flush_invalidate_dcache_range a2 a3 a4
+ dsync
+
+ retw
+
+ENDPROC(__flush_invalidate_dcache_range)
+
+/*
+ * void _flush_dcache_range(ulong start, ulong size)
+ */
+
+ENTRY(__flush_dcache_range)
+
+ entry sp, 16
+
+ ___flush_dcache_range a2 a3 a4
+ dsync
+
+ retw
+
+ENDPROC(__flush_dcache_range)
+
+/*
+ * void _invalidate_dcache_range(ulong start, ulong size)
+ */
+
+ENTRY(__invalidate_dcache_range)
+
+ entry sp, 16
+
+ ___invalidate_dcache_range a2 a3 a4
+
+ retw
+
+ENDPROC(__invalidate_dcache_range)
+
+/*
+ * void _invalidate_icache_all(void)
+ */
+
+ENTRY(__invalidate_icache_all)
+
+ entry sp, 16
+
+ ___invalidate_icache_all a2 a3
+ isync
+
+ retw
+
+ENDPROC(__invalidate_icache_all)
+
+/*
+ * void _flush_invalidate_dcache_all(void)
+ */
+
+ENTRY(__flush_invalidate_dcache_all)
+
+ entry sp, 16
+
+ ___flush_invalidate_dcache_all a2 a3
+ dsync
+
+ retw
+
+ENDPROC(__flush_invalidate_dcache_all)
+
+/*
+ * void _invalidate_dcache_all(void)
+ */
+
+ENTRY(__invalidate_dcache_all)
+
+ entry sp, 16
+
+ ___invalidate_dcache_all a2 a3
+ dsync
+
+ retw
+
+ENDPROC(__invalidate_dcache_all)
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
new file mode 100644
index 000000000..470843188
--- /dev/null
+++ b/arch/xtensa/mm/mmu.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xtensa mmu stuff
+ *
+ * Extracted from init.c
+ */
+#include <linux/bootmem.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/cache.h>
+
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/initialize_mmu.h>
+#include <asm/io.h>
+
+#if defined(CONFIG_HIGHMEM)
+static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
+{
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pte_t *pte;
+ unsigned long i;
+
+ n_pages = ALIGN(n_pages, PTRS_PER_PTE);
+
+ pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
+ __func__, vaddr, n_pages);
+
+ pte = alloc_bootmem_low_pages(n_pages * sizeof(pte_t));
+
+ for (i = 0; i < n_pages; ++i)
+ pte_clear(NULL, 0, pte + i);
+
+ for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
+ pte_t *cur_pte = pte + i;
+
+ BUG_ON(!pmd_none(*pmd));
+ set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
+ BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
+ pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
+ __func__, pmd, cur_pte);
+ }
+ return pte;
+}
+
+static void __init fixedrange_init(void)
+{
+ init_pmd(__fix_to_virt(0), __end_of_fixed_addresses);
+}
+#endif
+
+void __init paging_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ fixedrange_init();
+ pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
+ kmap_init();
+#endif
+}
+
+/*
+ * Flush the mmu and reset associated register to default values.
+ */
+void init_mmu(void)
+{
+#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
+ /*
+ * Writing zeros to the instruction and data TLBCFG special
+ * registers ensure that valid values exist in the register.
+ *
+ * For existing PGSZID<w> fields, zero selects the first element
+ * of the page-size array. For nonexistent PGSZID<w> fields,
+ * zero is the best value to write. Also, when changing PGSZID<w>
+ * fields, the corresponding TLB must be flushed.
+ */
+ set_itlbcfg_register(0);
+ set_dtlbcfg_register(0);
+#endif
+ init_kio();
+ local_flush_tlb_all();
+
+ /* Set rasid register to a known value. */
+
+ set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
+
+ /* Set PTEVADDR special register to the start of the page
+ * table, which is in kernel mappable space (ie. not
+ * statically mapped). This register's value is undefined on
+ * reset.
+ */
+ set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR);
+}
+
+void init_kio(void)
+{
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
+ /*
+ * Update the IO area mapping in case xtensa_kio_paddr has changed
+ */
+ write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
+ XCHAL_KIO_CACHED_VADDR + 6);
+ write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
+ XCHAL_KIO_CACHED_VADDR + 6);
+ write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
+ XCHAL_KIO_BYPASS_VADDR + 6);
+ write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
+ XCHAL_KIO_BYPASS_VADDR + 6);
+#endif
+}
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
new file mode 100644
index 000000000..b43f03620
--- /dev/null
+++ b/arch/xtensa/mm/tlb.c
@@ -0,0 +1,276 @@
+/*
+ * arch/xtensa/mm/tlb.c
+ *
+ * Logic that manipulates the Xtensa MMU. Derived from MIPS.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2003 Tensilica Inc.
+ *
+ * Joe Taylor
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier
+ */
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+
+static inline void __flush_itlb_all (void)
+{
+ int w, i;
+
+ for (w = 0; w < ITLB_ARF_WAYS; w++) {
+ for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
+ int e = w + (i << PAGE_SHIFT);
+ invalidate_itlb_entry_no_isync(e);
+ }
+ }
+ asm volatile ("isync\n");
+}
+
+static inline void __flush_dtlb_all (void)
+{
+ int w, i;
+
+ for (w = 0; w < DTLB_ARF_WAYS; w++) {
+ for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
+ int e = w + (i << PAGE_SHIFT);
+ invalidate_dtlb_entry_no_isync(e);
+ }
+ }
+ asm volatile ("isync\n");
+}
+
+
+void local_flush_tlb_all(void)
+{
+ __flush_itlb_all();
+ __flush_dtlb_all();
+}
+
+/* If mm is current, we simply assign the current task a new ASID, thus,
+ * invalidating all previous tlb entries. If mm is someone else's user mapping,
+ * wie invalidate the context, thus, when that user mapping is swapped in,
+ * a new context will be assigned to it.
+ */
+
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ int cpu = smp_processor_id();
+
+ if (mm == current->active_mm) {
+ unsigned long flags;
+ local_irq_save(flags);
+ mm->context.asid[cpu] = NO_CONTEXT;
+ activate_context(mm, cpu);
+ local_irq_restore(flags);
+ } else {
+ mm->context.asid[cpu] = NO_CONTEXT;
+ mm->context.cpu = -1;
+ }
+}
+
+
+#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
+#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
+#if _ITLB_ENTRIES > _DTLB_ENTRIES
+# define _TLB_ENTRIES _ITLB_ENTRIES
+#else
+# define _TLB_ENTRIES _DTLB_ENTRIES
+#endif
+
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ int cpu = smp_processor_id();
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long flags;
+
+ if (mm->context.asid[cpu] == NO_CONTEXT)
+ return;
+
+ pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
+ (unsigned long)mm->context.asid[cpu], start, end);
+ local_irq_save(flags);
+
+ if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
+ int oldpid = get_rasid_register();
+
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
+ start &= PAGE_MASK;
+ if (vma->vm_flags & VM_EXEC)
+ while(start < end) {
+ invalidate_itlb_mapping(start);
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+ else
+ while(start < end) {
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+
+ set_rasid_register(oldpid);
+ } else {
+ local_flush_tlb_mm(mm);
+ }
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int cpu = smp_processor_id();
+ struct mm_struct* mm = vma->vm_mm;
+ unsigned long flags;
+ int oldpid;
+
+ if (mm->context.asid[cpu] == NO_CONTEXT)
+ return;
+
+ local_irq_save(flags);
+
+ oldpid = get_rasid_register();
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
+
+ if (vma->vm_flags & VM_EXEC)
+ invalidate_itlb_mapping(page);
+ invalidate_dtlb_mapping(page);
+
+ set_rasid_register(oldpid);
+
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
+ end - start < _TLB_ENTRIES << PAGE_SHIFT) {
+ start &= PAGE_MASK;
+ while (start < end) {
+ invalidate_itlb_mapping(start);
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+ } else {
+ local_flush_tlb_all();
+ }
+}
+
+#ifdef CONFIG_DEBUG_TLB_SANITY
+
+static unsigned get_pte_for_vaddr(unsigned vaddr)
+{
+ struct task_struct *task = get_current();
+ struct mm_struct *mm = task->mm;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (!mm)
+ mm = task->active_mm;
+ pgd = pgd_offset(mm, vaddr);
+ if (pgd_none_or_clear_bad(pgd))
+ return 0;
+ pmd = pmd_offset(pgd, vaddr);
+ if (pmd_none_or_clear_bad(pmd))
+ return 0;
+ pte = pte_offset_map(pmd, vaddr);
+ if (!pte)
+ return 0;
+ return pte_val(*pte);
+}
+
+enum {
+ TLB_SUSPICIOUS = 1,
+ TLB_INSANE = 2,
+};
+
+static void tlb_insane(void)
+{
+ BUG_ON(1);
+}
+
+static void tlb_suspicious(void)
+{
+ WARN_ON(1);
+}
+
+/*
+ * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
+ * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
+ *
+ * Check that valid TLB entries either have the same PA as the PTE, or PTE is
+ * marked as non-present. Non-present PTE and the page with non-zero refcount
+ * and zero mapcount is normal for batched TLB flush operation. Zero refcount
+ * means that the page was freed prematurely. Non-zero mapcount is unusual,
+ * but does not necessary means an error, thus marked as suspicious.
+ */
+static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
+{
+ unsigned tlbidx = w | (e << PAGE_SHIFT);
+ unsigned r0 = dtlb ?
+ read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
+ unsigned r1 = dtlb ?
+ read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
+ unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
+ unsigned pte = get_pte_for_vaddr(vpn);
+ unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
+ unsigned tlb_asid = r0 & ASID_MASK;
+ bool kernel = tlb_asid == 1;
+ int rc = 0;
+
+ if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
+ pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
+ dtlb ? 'D' : 'I', w, e, vpn,
+ kernel ? "kernel" : "user");
+ rc |= TLB_INSANE;
+ }
+
+ if (tlb_asid == mm_asid) {
+ if ((pte ^ r1) & PAGE_MASK) {
+ pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
+ dtlb ? 'D' : 'I', w, e, r0, r1, pte);
+ if (pte == 0 || !pte_present(__pte(pte))) {
+ struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
+ pr_err("page refcount: %d, mapcount: %d\n",
+ page_count(p),
+ page_mapcount(p));
+ if (!page_count(p))
+ rc |= TLB_INSANE;
+ else if (page_mapcount(p))
+ rc |= TLB_SUSPICIOUS;
+ } else {
+ rc |= TLB_INSANE;
+ }
+ }
+ }
+ return rc;
+}
+
+void check_tlb_sanity(void)
+{
+ unsigned long flags;
+ unsigned w, e;
+ int bug = 0;
+
+ local_irq_save(flags);
+ for (w = 0; w < DTLB_ARF_WAYS; ++w)
+ for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
+ bug |= check_tlb_entry(w, e, true);
+ for (w = 0; w < ITLB_ARF_WAYS; ++w)
+ for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
+ bug |= check_tlb_entry(w, e, false);
+ if (bug & TLB_INSANE)
+ tlb_insane();
+ if (bug & TLB_SUSPICIOUS)
+ tlb_suspicious();
+ local_irq_restore(flags);
+}
+
+#endif /* CONFIG_DEBUG_TLB_SANITY */
diff --git a/arch/xtensa/oprofile/Makefile b/arch/xtensa/oprofile/Makefile
new file mode 100644
index 000000000..f559b9ffb
--- /dev/null
+++ b/arch/xtensa/oprofile/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c
new file mode 100644
index 000000000..8f952034e
--- /dev/null
+++ b/arch/xtensa/oprofile/backtrace.c
@@ -0,0 +1,27 @@
+/**
+ * @file backtrace.c
+ *
+ * @remark Copyright 2008 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ * @remark Read the file COPYING
+ *
+ */
+
+#include <linux/oprofile.h>
+#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+
+static int xtensa_backtrace_cb(struct stackframe *frame, void *data)
+{
+ oprofile_add_trace(frame->pc);
+ return 0;
+}
+
+void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth)
+{
+ if (user_mode(regs))
+ xtensa_backtrace_user(regs, depth, xtensa_backtrace_cb, NULL);
+ else
+ xtensa_backtrace_kernel(regs, depth, xtensa_backtrace_cb,
+ xtensa_backtrace_cb, NULL);
+}
diff --git a/arch/xtensa/oprofile/init.c b/arch/xtensa/oprofile/init.c
new file mode 100644
index 000000000..a67eea379
--- /dev/null
+++ b/arch/xtensa/oprofile/init.c
@@ -0,0 +1,26 @@
+/**
+ * @file init.c
+ *
+ * @remark Copyright 2008 Tensilica Inc.
+ * @remark Read the file COPYING
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/oprofile.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+
+extern void xtensa_backtrace(struct pt_regs *const regs, unsigned int depth);
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ ops->backtrace = xtensa_backtrace;
+ return -ENODEV;
+}
+
+
+void oprofile_arch_exit(void)
+{
+}
diff --git a/arch/xtensa/platforms/iss/Makefile b/arch/xtensa/platforms/iss/Makefile
new file mode 100644
index 000000000..b3e89291c
--- /dev/null
+++ b/arch/xtensa/platforms/iss/Makefile
@@ -0,0 +1,10 @@
+# $Id: Makefile,v 1.1.1.1 2002/08/28 16:10:14 aroll Exp $
+#
+# Makefile for the Xtensa Instruction Set Simulator (ISS)
+# "prom monitor" library routines under Linux.
+#
+
+obj-y = setup.o
+obj-$(CONFIG_TTY) += console.o
+obj-$(CONFIG_NET) += network.o
+obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
new file mode 100644
index 000000000..e7faea3d7
--- /dev/null
+++ b/arch/xtensa/platforms/iss/console.c
@@ -0,0 +1,268 @@
+/*
+ * arch/xtensa/platforms/iss/console.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2005 Tensilica Inc.
+ * Authors Christian Zankel, Joe Taylor
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/param.h>
+#include <linux/seq_file.h>
+#include <linux/serial.h>
+
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+
+#include <platform/simcall.h>
+
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#define SERIAL_MAX_NUM_LINES 1
+#define SERIAL_TIMER_VALUE (HZ / 10)
+
+static struct tty_driver *serial_driver;
+static struct tty_port serial_port;
+static struct timer_list serial_timer;
+
+static DEFINE_SPINLOCK(timer_lock);
+
+static char *serial_version = "0.1";
+static char *serial_name = "ISS serial driver";
+
+/*
+ * This routine is called whenever a serial port is opened. It
+ * enables interrupts for a serial port, linking in its async structure into
+ * the IRQ chain. It also performs the serial-specific
+ * initialization for the tty structure.
+ */
+
+static void rs_poll(struct timer_list *);
+
+static int rs_open(struct tty_struct *tty, struct file * filp)
+{
+ tty->port = &serial_port;
+ spin_lock_bh(&timer_lock);
+ if (tty->count == 1) {
+ timer_setup(&serial_timer, rs_poll, 0);
+ mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ }
+ spin_unlock_bh(&timer_lock);
+
+ return 0;
+}
+
+
+/*
+ * ------------------------------------------------------------
+ * iss_serial_close()
+ *
+ * This routine is called when the serial port gets closed. First, we
+ * wait for the last remaining data to be sent. Then, we unlink its
+ * async structure from the interrupt chain if necessary, and we free
+ * that IRQ if nothing is left in the chain.
+ * ------------------------------------------------------------
+ */
+static void rs_close(struct tty_struct *tty, struct file * filp)
+{
+ spin_lock_bh(&timer_lock);
+ if (tty->count == 1)
+ del_timer_sync(&serial_timer);
+ spin_unlock_bh(&timer_lock);
+}
+
+
+static int rs_write(struct tty_struct * tty,
+ const unsigned char *buf, int count)
+{
+ /* see drivers/char/serialX.c to reference original version */
+
+ simc_write(1, buf, count);
+ return count;
+}
+
+static void rs_poll(struct timer_list *unused)
+{
+ struct tty_port *port = &serial_port;
+ int i = 0;
+ int rd = 1;
+ unsigned char c;
+
+ spin_lock(&timer_lock);
+
+ while (simc_poll(0)) {
+ rd = simc_read(0, &c, 1);
+ if (rd <= 0)
+ break;
+ tty_insert_flip_char(port, c, TTY_NORMAL);
+ i++;
+ }
+
+ if (i)
+ tty_flip_buffer_push(port);
+ if (rd)
+ mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ spin_unlock(&timer_lock);
+}
+
+
+static int rs_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ return rs_write(tty, &ch, 1);
+}
+
+static void rs_flush_chars(struct tty_struct *tty)
+{
+}
+
+static int rs_write_room(struct tty_struct *tty)
+{
+ /* Let's say iss can always accept 2K characters.. */
+ return 2 * 1024;
+}
+
+static int rs_chars_in_buffer(struct tty_struct *tty)
+{
+ /* the iss doesn't buffer characters */
+ return 0;
+}
+
+static void rs_hangup(struct tty_struct *tty)
+{
+ /* Stub, once again.. */
+}
+
+static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ /* Stub, once again.. */
+}
+
+static int rs_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version);
+ return 0;
+}
+
+static const struct tty_operations serial_ops = {
+ .open = rs_open,
+ .close = rs_close,
+ .write = rs_write,
+ .put_char = rs_put_char,
+ .flush_chars = rs_flush_chars,
+ .write_room = rs_write_room,
+ .chars_in_buffer = rs_chars_in_buffer,
+ .hangup = rs_hangup,
+ .wait_until_sent = rs_wait_until_sent,
+ .proc_show = rs_proc_show,
+};
+
+int __init rs_init(void)
+{
+ int ret;
+
+ serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
+ if (!serial_driver)
+ return -ENOMEM;
+
+ tty_port_init(&serial_port);
+
+ pr_info("%s %s\n", serial_name, serial_version);
+
+ /* Initialize the tty_driver structure */
+
+ serial_driver->driver_name = "iss_serial";
+ serial_driver->name = "ttyS";
+ serial_driver->major = TTY_MAJOR;
+ serial_driver->minor_start = 64;
+ serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ serial_driver->init_termios = tty_std_termios;
+ serial_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver->flags = TTY_DRIVER_REAL_RAW;
+
+ tty_set_operations(serial_driver, &serial_ops);
+ tty_port_link_device(&serial_port, serial_driver, 0);
+
+ ret = tty_register_driver(serial_driver);
+ if (ret) {
+ pr_err("Couldn't register serial driver\n");
+ tty_driver_kref_put(serial_driver);
+ tty_port_destroy(&serial_port);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static __exit void rs_exit(void)
+{
+ int error;
+
+ if ((error = tty_unregister_driver(serial_driver)))
+ pr_err("ISS_SERIAL: failed to unregister serial driver (%d)\n",
+ error);
+ put_tty_driver(serial_driver);
+ tty_port_destroy(&serial_port);
+}
+
+
+/* We use `late_initcall' instead of just `__initcall' as a workaround for
+ * the fact that (1) simcons_tty_init can't be called before tty_init,
+ * (2) tty_init is called via `module_init', (3) if statically linked,
+ * module_init == device_init, and (4) there's no ordering of init lists.
+ * We can do this easily because simcons is always statically linked, but
+ * other tty drivers that depend on tty_init and which must use
+ * `module_init' to declare their init routines are likely to be broken.
+ */
+
+late_initcall(rs_init);
+
+
+#ifdef CONFIG_SERIAL_CONSOLE
+
+static void iss_console_write(struct console *co, const char *s, unsigned count)
+{
+ int len = strlen(s);
+
+ if (s != 0 && *s != 0)
+ simc_write(1, s, count < len ? count : len);
+}
+
+static struct tty_driver* iss_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return serial_driver;
+}
+
+
+static struct console sercons = {
+ .name = "ttyS",
+ .write = iss_console_write,
+ .device = iss_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1
+};
+
+static int __init iss_console_init(void)
+{
+ register_console(&sercons);
+ return 0;
+}
+
+console_initcall(iss_console_init);
+
+#endif /* CONFIG_SERIAL_CONSOLE */
+
diff --git a/arch/xtensa/platforms/iss/include/platform/serial.h b/arch/xtensa/platforms/iss/include/platform/serial.h
new file mode 100644
index 000000000..16aec542d
--- /dev/null
+++ b/arch/xtensa/platforms/iss/include/platform/serial.h
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Tensilica Inc.
+ */
+
+#ifndef __ASM_XTENSA_ISS_SERIAL_H
+#define __ASM_XTENSA_ISS_SERIAL_H
+
+/* Have no meaning on ISS, but needed for 8250_early.c */
+#define BASE_BAUD 0
+
+#endif /* __ASM_XTENSA_ISS_SERIAL_H */
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h
new file mode 100644
index 000000000..2ba45858e
--- /dev/null
+++ b/arch/xtensa/platforms/iss/include/platform/simcall.h
@@ -0,0 +1,142 @@
+/*
+ * include/asm-xtensa/platform-iss/simcall.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Tensilica Inc.
+ * Copyright (C) 2017 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_PLATFORM_ISS_SIMCALL_H
+#define _XTENSA_PLATFORM_ISS_SIMCALL_H
+
+
+/*
+ * System call like services offered by the simulator host.
+ */
+
+#define SYS_nop 0 /* unused */
+#define SYS_exit 1 /*x*/
+#define SYS_fork 2
+#define SYS_read 3 /*x*/
+#define SYS_write 4 /*x*/
+#define SYS_open 5 /*x*/
+#define SYS_close 6 /*x*/
+#define SYS_rename 7 /*x 38 - waitpid */
+#define SYS_creat 8 /*x*/
+#define SYS_link 9 /*x (not implemented on WIN32) */
+#define SYS_unlink 10 /*x*/
+#define SYS_execv 11 /* n/a - execve */
+#define SYS_execve 12 /* 11 - chdir */
+#define SYS_pipe 13 /* 42 - time */
+#define SYS_stat 14 /* 106 - mknod */
+#define SYS_chmod 15
+#define SYS_chown 16 /* 202 - lchown */
+#define SYS_utime 17 /* 30 - break */
+#define SYS_wait 18 /* n/a - oldstat */
+#define SYS_lseek 19 /*x*/
+#define SYS_getpid 20
+#define SYS_isatty 21 /* n/a - mount */
+#define SYS_fstat 22 /* 108 - oldumount */
+#define SYS_time 23 /* 13 - setuid */
+#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */
+#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */
+#define SYS_socket 26
+#define SYS_sendto 27
+#define SYS_recvfrom 28
+#define SYS_select_one 29 /* not compitible select, one file descriptor at the time */
+#define SYS_bind 30
+#define SYS_ioctl 31
+
+#define SYS_iss_argc 1000 /* returns value of argc */
+#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */
+#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */
+
+/*
+ * SYS_select_one specifiers
+ */
+
+#define XTISS_SELECT_ONE_READ 1
+#define XTISS_SELECT_ONE_WRITE 2
+#define XTISS_SELECT_ONE_EXCEPT 3
+
+static int errno;
+
+static inline int __simc(int a, int b, int c, int d)
+{
+ int ret;
+ register int a1 asm("a2") = a;
+ register int b1 asm("a3") = b;
+ register int c1 asm("a4") = c;
+ register int d1 asm("a5") = d;
+ __asm__ __volatile__ (
+ "simcall\n"
+ "mov %0, a2\n"
+ "mov %1, a3\n"
+ : "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1)
+ : "r"(c1), "r"(d1)
+ : "memory");
+ return ret;
+}
+
+static inline int simc_exit(int exit_code)
+{
+ return __simc(SYS_exit, exit_code, 0, 0);
+}
+
+static inline int simc_open(const char *file, int flags, int mode)
+{
+ return __simc(SYS_open, (int) file, flags, mode);
+}
+
+static inline int simc_close(int fd)
+{
+ return __simc(SYS_close, fd, 0, 0);
+}
+
+static inline int simc_ioctl(int fd, int request, void *arg)
+{
+ return __simc(SYS_ioctl, fd, request, (int) arg);
+}
+
+static inline int simc_read(int fd, void *buf, size_t count)
+{
+ return __simc(SYS_read, fd, (int) buf, count);
+}
+
+static inline int simc_write(int fd, const void *buf, size_t count)
+{
+ return __simc(SYS_write, fd, (int) buf, count);
+}
+
+static inline int simc_poll(int fd)
+{
+ struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
+
+ return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv);
+}
+
+static inline int simc_lseek(int fd, uint32_t off, int whence)
+{
+ return __simc(SYS_lseek, fd, off, whence);
+}
+
+static inline int simc_argc(void)
+{
+ return __simc(SYS_iss_argc, 0, 0, 0);
+}
+
+static inline int simc_argv_size(void)
+{
+ return __simc(SYS_iss_argv_size, 0, 0, 0);
+}
+
+static inline void simc_argv(void *buf)
+{
+ __simc(SYS_iss_set_argv, (int)buf, 0, 0);
+}
+
+#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */
+
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
new file mode 100644
index 000000000..d027dddc4
--- /dev/null
+++ b/arch/xtensa/platforms/iss/network.c
@@ -0,0 +1,683 @@
+/*
+ *
+ * arch/xtensa/platforms/iss/network.c
+ *
+ * Platform specific initialization.
+ *
+ * Authors: Chris Zankel <chris@zankel.net>
+ * Based on work form the UML team.
+ *
+ * Copyright 2005 Tensilica Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/if_ether.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/if_tun.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/bootmem.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/platform_device.h>
+
+#include <platform/simcall.h>
+
+#define DRIVER_NAME "iss-netdev"
+#define ETH_MAX_PACKET 1500
+#define ETH_HEADER_OTHER 14
+#define ISS_NET_TIMER_VALUE (HZ / 10)
+
+
+static DEFINE_SPINLOCK(opened_lock);
+static LIST_HEAD(opened);
+
+static DEFINE_SPINLOCK(devices_lock);
+static LIST_HEAD(devices);
+
+/* ------------------------------------------------------------------------- */
+
+/* We currently only support the TUNTAP transport protocol. */
+
+#define TRANSPORT_TUNTAP_NAME "tuntap"
+#define TRANSPORT_TUNTAP_MTU ETH_MAX_PACKET
+
+struct tuntap_info {
+ char dev_name[IFNAMSIZ];
+ int fd;
+};
+
+/* ------------------------------------------------------------------------- */
+
+
+/* This structure contains out private information for the driver. */
+
+struct iss_net_private {
+ struct list_head device_list;
+ struct list_head opened_list;
+
+ spinlock_t lock;
+ struct net_device *dev;
+ struct platform_device pdev;
+ struct timer_list tl;
+ struct net_device_stats stats;
+
+ struct timer_list timer;
+ unsigned int timer_val;
+
+ int index;
+ int mtu;
+
+ struct {
+ union {
+ struct tuntap_info tuntap;
+ } info;
+
+ int (*open)(struct iss_net_private *lp);
+ void (*close)(struct iss_net_private *lp);
+ int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
+ int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
+ unsigned short (*protocol)(struct sk_buff *skb);
+ int (*poll)(struct iss_net_private *lp);
+ } tp;
+
+};
+
+/* ================================ HELPERS ================================ */
+
+
+static char *split_if_spec(char *str, ...)
+{
+ char **arg, *end;
+ va_list ap;
+
+ va_start(ap, str);
+ while ((arg = va_arg(ap, char**)) != NULL) {
+ if (*str == '\0') {
+ va_end(ap);
+ return NULL;
+ }
+ end = strchr(str, ',');
+ if (end != str)
+ *arg = str;
+ if (end == NULL) {
+ va_end(ap);
+ return NULL;
+ }
+ *end++ = '\0';
+ str = end;
+ }
+ va_end(ap);
+ return str;
+}
+
+/* Set Ethernet address of the specified device. */
+
+static void setup_etheraddr(struct net_device *dev, char *str)
+{
+ unsigned char *addr = dev->dev_addr;
+
+ if (str == NULL)
+ goto random;
+
+ if (!mac_pton(str, addr)) {
+ pr_err("%s: failed to parse '%s' as an ethernet address\n",
+ dev->name, str);
+ goto random;
+ }
+ if (is_multicast_ether_addr(addr)) {
+ pr_err("%s: attempt to assign a multicast ethernet address\n",
+ dev->name);
+ goto random;
+ }
+ if (!is_valid_ether_addr(addr)) {
+ pr_err("%s: attempt to assign an invalid ethernet address\n",
+ dev->name);
+ goto random;
+ }
+ if (!is_local_ether_addr(addr))
+ pr_warn("%s: assigning a globally valid ethernet address\n",
+ dev->name);
+ return;
+
+random:
+ pr_info("%s: choosing a random ethernet address\n",
+ dev->name);
+ eth_hw_addr_random(dev);
+}
+
+/* ======================= TUNTAP TRANSPORT INTERFACE ====================== */
+
+static int tuntap_open(struct iss_net_private *lp)
+{
+ struct ifreq ifr;
+ char *dev_name = lp->tp.info.tuntap.dev_name;
+ int err = -EINVAL;
+ int fd;
+
+ fd = simc_open("/dev/net/tun", 02, 0); /* O_RDWR */
+ if (fd < 0) {
+ pr_err("%s: failed to open /dev/net/tun, returned %d (errno = %d)\n",
+ lp->dev->name, fd, errno);
+ return fd;
+ }
+
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
+ strlcpy(ifr.ifr_name, dev_name, sizeof(ifr.ifr_name));
+
+ err = simc_ioctl(fd, TUNSETIFF, &ifr);
+ if (err < 0) {
+ pr_err("%s: failed to set interface %s, returned %d (errno = %d)\n",
+ lp->dev->name, dev_name, err, errno);
+ simc_close(fd);
+ return err;
+ }
+
+ lp->tp.info.tuntap.fd = fd;
+ return err;
+}
+
+static void tuntap_close(struct iss_net_private *lp)
+{
+ simc_close(lp->tp.info.tuntap.fd);
+ lp->tp.info.tuntap.fd = -1;
+}
+
+static int tuntap_read(struct iss_net_private *lp, struct sk_buff **skb)
+{
+ return simc_read(lp->tp.info.tuntap.fd,
+ (*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER);
+}
+
+static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb)
+{
+ return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
+}
+
+unsigned short tuntap_protocol(struct sk_buff *skb)
+{
+ return eth_type_trans(skb, skb->dev);
+}
+
+static int tuntap_poll(struct iss_net_private *lp)
+{
+ return simc_poll(lp->tp.info.tuntap.fd);
+}
+
+/*
+ * ethX=tuntap,[mac address],device name
+ */
+
+static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
+{
+ struct net_device *dev = lp->dev;
+ char *dev_name = NULL, *mac_str = NULL, *rem = NULL;
+
+ /* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */
+
+ if (strncmp(init, TRANSPORT_TUNTAP_NAME,
+ sizeof(TRANSPORT_TUNTAP_NAME) - 1))
+ return 0;
+
+ init += sizeof(TRANSPORT_TUNTAP_NAME) - 1;
+ if (*init == ',') {
+ rem = split_if_spec(init + 1, &mac_str, &dev_name);
+ if (rem != NULL) {
+ pr_err("%s: extra garbage on specification : '%s'\n",
+ dev->name, rem);
+ return 0;
+ }
+ } else if (*init != '\0') {
+ pr_err("%s: invalid argument: %s. Skipping device!\n",
+ dev->name, init);
+ return 0;
+ }
+
+ if (!dev_name) {
+ pr_err("%s: missing tuntap device name\n", dev->name);
+ return 0;
+ }
+
+ strlcpy(lp->tp.info.tuntap.dev_name, dev_name,
+ sizeof(lp->tp.info.tuntap.dev_name));
+
+ setup_etheraddr(dev, mac_str);
+
+ lp->mtu = TRANSPORT_TUNTAP_MTU;
+
+ lp->tp.info.tuntap.fd = -1;
+
+ lp->tp.open = tuntap_open;
+ lp->tp.close = tuntap_close;
+ lp->tp.read = tuntap_read;
+ lp->tp.write = tuntap_write;
+ lp->tp.protocol = tuntap_protocol;
+ lp->tp.poll = tuntap_poll;
+
+ return 1;
+}
+
+/* ================================ ISS NET ================================ */
+
+static int iss_net_rx(struct net_device *dev)
+{
+ struct iss_net_private *lp = netdev_priv(dev);
+ int pkt_len;
+ struct sk_buff *skb;
+
+ /* Check if there is any new data. */
+
+ if (lp->tp.poll(lp) == 0)
+ return 0;
+
+ /* Try to allocate memory, if it fails, try again next round. */
+
+ skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER);
+ if (skb == NULL) {
+ lp->stats.rx_dropped++;
+ return 0;
+ }
+
+ skb_reserve(skb, 2);
+
+ /* Setup skb */
+
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ pkt_len = lp->tp.read(lp, &skb);
+ skb_put(skb, pkt_len);
+
+ if (pkt_len > 0) {
+ skb_trim(skb, pkt_len);
+ skb->protocol = lp->tp.protocol(skb);
+
+ lp->stats.rx_bytes += skb->len;
+ lp->stats.rx_packets++;
+ netif_rx_ni(skb);
+ return pkt_len;
+ }
+ kfree_skb(skb);
+ return pkt_len;
+}
+
+static int iss_net_poll(void)
+{
+ struct list_head *ele;
+ int err, ret = 0;
+
+ spin_lock(&opened_lock);
+
+ list_for_each(ele, &opened) {
+ struct iss_net_private *lp;
+
+ lp = list_entry(ele, struct iss_net_private, opened_list);
+
+ if (!netif_running(lp->dev))
+ break;
+
+ spin_lock(&lp->lock);
+
+ while ((err = iss_net_rx(lp->dev)) > 0)
+ ret++;
+
+ spin_unlock(&lp->lock);
+
+ if (err < 0) {
+ pr_err("Device '%s' read returned %d, shutting it down\n",
+ lp->dev->name, err);
+ dev_close(lp->dev);
+ } else {
+ /* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */
+ }
+ }
+
+ spin_unlock(&opened_lock);
+ return ret;
+}
+
+
+static void iss_net_timer(struct timer_list *t)
+{
+ struct iss_net_private *lp = from_timer(lp, t, timer);
+
+ iss_net_poll();
+ spin_lock(&lp->lock);
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+ spin_unlock(&lp->lock);
+}
+
+
+static int iss_net_open(struct net_device *dev)
+{
+ struct iss_net_private *lp = netdev_priv(dev);
+ int err;
+
+ spin_lock_bh(&lp->lock);
+
+ err = lp->tp.open(lp);
+ if (err < 0)
+ goto out;
+
+ netif_start_queue(dev);
+
+ /* clear buffer - it can happen that the host side of the interface
+ * is full when we get here. In this case, new data is never queued,
+ * SIGIOs never arrive, and the net never works.
+ */
+ while ((err = iss_net_rx(dev)) > 0)
+ ;
+
+ spin_unlock_bh(&lp->lock);
+ spin_lock_bh(&opened_lock);
+ list_add(&lp->opened_list, &opened);
+ spin_unlock_bh(&opened_lock);
+ spin_lock_bh(&lp->lock);
+
+ timer_setup(&lp->timer, iss_net_timer, 0);
+ lp->timer_val = ISS_NET_TIMER_VALUE;
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+
+out:
+ spin_unlock_bh(&lp->lock);
+ return err;
+}
+
+static int iss_net_close(struct net_device *dev)
+{
+ struct iss_net_private *lp = netdev_priv(dev);
+ netif_stop_queue(dev);
+ spin_lock_bh(&lp->lock);
+
+ spin_lock(&opened_lock);
+ list_del(&opened);
+ spin_unlock(&opened_lock);
+
+ del_timer_sync(&lp->timer);
+
+ lp->tp.close(lp);
+
+ spin_unlock_bh(&lp->lock);
+ return 0;
+}
+
+static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct iss_net_private *lp = netdev_priv(dev);
+ int len;
+
+ netif_stop_queue(dev);
+ spin_lock_bh(&lp->lock);
+
+ len = lp->tp.write(lp, &skb);
+
+ if (len == skb->len) {
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += skb->len;
+ netif_trans_update(dev);
+ netif_start_queue(dev);
+
+ /* this is normally done in the interrupt when tx finishes */
+ netif_wake_queue(dev);
+
+ } else if (len == 0) {
+ netif_start_queue(dev);
+ lp->stats.tx_dropped++;
+
+ } else {
+ netif_start_queue(dev);
+ pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
+ }
+
+ spin_unlock_bh(&lp->lock);
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+
+static struct net_device_stats *iss_net_get_stats(struct net_device *dev)
+{
+ struct iss_net_private *lp = netdev_priv(dev);
+ return &lp->stats;
+}
+
+static void iss_net_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void iss_net_tx_timeout(struct net_device *dev)
+{
+}
+
+static int iss_net_set_mac(struct net_device *dev, void *addr)
+{
+ struct iss_net_private *lp = netdev_priv(dev);
+ struct sockaddr *hwaddr = addr;
+
+ if (!is_valid_ether_addr(hwaddr->sa_data))
+ return -EADDRNOTAVAIL;
+ spin_lock_bh(&lp->lock);
+ memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
+ spin_unlock_bh(&lp->lock);
+ return 0;
+}
+
+static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+ return -EINVAL;
+}
+
+void iss_net_user_timer_expire(struct timer_list *unused)
+{
+}
+
+
+static struct platform_driver iss_net_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int driver_registered;
+
+static const struct net_device_ops iss_netdev_ops = {
+ .ndo_open = iss_net_open,
+ .ndo_stop = iss_net_close,
+ .ndo_get_stats = iss_net_get_stats,
+ .ndo_start_xmit = iss_net_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = iss_net_change_mtu,
+ .ndo_set_mac_address = iss_net_set_mac,
+ .ndo_tx_timeout = iss_net_tx_timeout,
+ .ndo_set_rx_mode = iss_net_set_multicast_list,
+};
+
+static int iss_net_configure(int index, char *init)
+{
+ struct net_device *dev;
+ struct iss_net_private *lp;
+ int err;
+
+ dev = alloc_etherdev(sizeof(*lp));
+ if (dev == NULL) {
+ pr_err("eth_configure: failed to allocate device\n");
+ return 1;
+ }
+
+ /* Initialize private element. */
+
+ lp = netdev_priv(dev);
+ *lp = (struct iss_net_private) {
+ .device_list = LIST_HEAD_INIT(lp->device_list),
+ .opened_list = LIST_HEAD_INIT(lp->opened_list),
+ .dev = dev,
+ .index = index,
+ };
+
+ spin_lock_init(&lp->lock);
+ /*
+ * If this name ends up conflicting with an existing registered
+ * netdevice, that is OK, register_netdev{,ice}() will notice this
+ * and fail.
+ */
+ snprintf(dev->name, sizeof(dev->name), "eth%d", index);
+
+ /*
+ * Try all transport protocols.
+ * Note: more protocols can be added by adding '&& !X_init(lp, eth)'.
+ */
+
+ if (!tuntap_probe(lp, index, init)) {
+ pr_err("%s: invalid arguments. Skipping device!\n",
+ dev->name);
+ goto errout;
+ }
+
+ pr_info("Netdevice %d (%pM)\n", index, dev->dev_addr);
+
+ /* sysfs register */
+
+ if (!driver_registered) {
+ platform_driver_register(&iss_net_driver);
+ driver_registered = 1;
+ }
+
+ spin_lock(&devices_lock);
+ list_add(&lp->device_list, &devices);
+ spin_unlock(&devices_lock);
+
+ lp->pdev.id = index;
+ lp->pdev.name = DRIVER_NAME;
+ platform_device_register(&lp->pdev);
+ SET_NETDEV_DEV(dev, &lp->pdev.dev);
+
+ dev->netdev_ops = &iss_netdev_ops;
+ dev->mtu = lp->mtu;
+ dev->watchdog_timeo = (HZ >> 1);
+ dev->irq = -1;
+
+ rtnl_lock();
+ err = register_netdevice(dev);
+ rtnl_unlock();
+
+ if (err) {
+ pr_err("%s: error registering net device!\n", dev->name);
+ /* XXX: should we call ->remove() here? */
+ free_netdev(dev);
+ return 1;
+ }
+
+ timer_setup(&lp->tl, iss_net_user_timer_expire, 0);
+
+ return 0;
+
+errout:
+ /* FIXME: unregister; free, etc.. */
+ return -EIO;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* Filled in during early boot */
+
+struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
+
+struct iss_net_init {
+ struct list_head list;
+ char *init; /* init string */
+ int index;
+};
+
+/*
+ * Parse the command line and look for 'ethX=...' fields, and register all
+ * those fields. They will be later initialized in iss_net_init.
+ */
+
+static int __init iss_net_setup(char *str)
+{
+ struct iss_net_private *device = NULL;
+ struct iss_net_init *new;
+ struct list_head *ele;
+ char *end;
+ int rc;
+ unsigned n;
+
+ end = strchr(str, '=');
+ if (!end) {
+ pr_err("Expected '=' after device number\n");
+ return 1;
+ }
+ *end = 0;
+ rc = kstrtouint(str, 0, &n);
+ *end = '=';
+ if (rc < 0) {
+ pr_err("Failed to parse '%s'\n", str);
+ return 1;
+ }
+ str = end;
+
+ spin_lock(&devices_lock);
+
+ list_for_each(ele, &devices) {
+ device = list_entry(ele, struct iss_net_private, device_list);
+ if (device->index == n)
+ break;
+ }
+
+ spin_unlock(&devices_lock);
+
+ if (device && device->index == n) {
+ pr_err("Device %u already configured\n", n);
+ return 1;
+ }
+
+ new = alloc_bootmem(sizeof(*new));
+ if (new == NULL) {
+ pr_err("Alloc_bootmem failed\n");
+ return 1;
+ }
+
+ INIT_LIST_HEAD(&new->list);
+ new->index = n;
+ new->init = str + 1;
+
+ list_add_tail(&new->list, &eth_cmd_line);
+ return 1;
+}
+
+__setup("eth", iss_net_setup);
+
+/*
+ * Initialize all ISS Ethernet devices previously registered in iss_net_setup.
+ */
+
+static int iss_net_init(void)
+{
+ struct list_head *ele, *next;
+
+ /* Walk through all Ethernet devices specified in the command line. */
+
+ list_for_each_safe(ele, next, &eth_cmd_line) {
+ struct iss_net_init *eth;
+ eth = list_entry(ele, struct iss_net_init, list);
+ iss_net_configure(eth->index, eth->init);
+ }
+
+ return 1;
+}
+device_initcall(iss_net_init);
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
new file mode 100644
index 000000000..58709e89a
--- /dev/null
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -0,0 +1,106 @@
+/*
+ *
+ * arch/xtensa/platform-iss/setup.c
+ *
+ * Platform specific initialization.
+ *
+ * Authors: Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ *
+ * Copyright 2001 - 2005 Tensilica Inc.
+ * Copyright 2017 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/bootmem.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/stringify.h>
+#include <linux/notifier.h>
+
+#include <asm/platform.h>
+#include <asm/bootparam.h>
+#include <asm/setup.h>
+
+#include <platform/simcall.h>
+
+
+void __init platform_init(bp_tag_t* bootparam)
+{
+}
+
+void platform_halt(void)
+{
+ pr_info(" ** Called platform_halt() **\n");
+ simc_exit(0);
+}
+
+void platform_power_off(void)
+{
+ pr_info(" ** Called platform_power_off() **\n");
+ simc_exit(0);
+}
+void platform_restart(void)
+{
+ /* Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector. */
+ cpu_reset();
+ /* control never gets here */
+}
+
+void platform_heartbeat(void)
+{
+}
+
+static int
+iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ simc_exit(1);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block iss_panic_block = {
+ .notifier_call = iss_panic_event,
+};
+
+void __init platform_setup(char **p_cmdline)
+{
+ static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata;
+ static char cmdline[COMMAND_LINE_SIZE] __initdata;
+ int argc = simc_argc();
+ int argv_size = simc_argv_size();
+
+ if (argc > 1) {
+ if (argv_size > sizeof(argv)) {
+ pr_err("%s: command line too long: argv_size = %d\n",
+ __func__, argv_size);
+ } else {
+ int i;
+
+ cmdline[0] = 0;
+ simc_argv((void *)argv);
+
+ for (i = 1; i < argc; ++i) {
+ if (i > 1)
+ strcat(cmdline, " ");
+ strcat(cmdline, argv[i]);
+ }
+ *p_cmdline = cmdline;
+ }
+ }
+
+ atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
+}
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
new file mode 100644
index 000000000..026211e7a
--- /dev/null
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -0,0 +1,372 @@
+/*
+ * arch/xtensa/platforms/iss/simdisk.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2013 Tensilica Inc.
+ * Authors Victor Prupis
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <platform/simcall.h>
+
+#define SIMDISK_MAJOR 240
+#define SIMDISK_MINORS 1
+#define MAX_SIMDISK_COUNT 10
+
+struct simdisk {
+ const char *filename;
+ spinlock_t lock;
+ struct request_queue *queue;
+ struct gendisk *gd;
+ struct proc_dir_entry *procfile;
+ int users;
+ unsigned long size;
+ int fd;
+};
+
+
+static int simdisk_count = CONFIG_BLK_DEV_SIMDISK_COUNT;
+module_param(simdisk_count, int, S_IRUGO);
+MODULE_PARM_DESC(simdisk_count, "Number of simdisk units.");
+
+static int n_files;
+static const char *filename[MAX_SIMDISK_COUNT] = {
+#ifdef CONFIG_SIMDISK0_FILENAME
+ CONFIG_SIMDISK0_FILENAME,
+#ifdef CONFIG_SIMDISK1_FILENAME
+ CONFIG_SIMDISK1_FILENAME,
+#endif
+#endif
+};
+
+static int simdisk_param_set_filename(const char *val,
+ const struct kernel_param *kp)
+{
+ if (n_files < ARRAY_SIZE(filename))
+ filename[n_files++] = val;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static const struct kernel_param_ops simdisk_param_ops_filename = {
+ .set = simdisk_param_set_filename,
+};
+module_param_cb(filename, &simdisk_param_ops_filename, &n_files, 0);
+MODULE_PARM_DESC(filename, "Backing storage filename.");
+
+static int simdisk_major = SIMDISK_MAJOR;
+
+static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
+ unsigned long nsect, char *buffer, int write)
+{
+ unsigned long offset = sector << SECTOR_SHIFT;
+ unsigned long nbytes = nsect << SECTOR_SHIFT;
+
+ if (offset > dev->size || dev->size - offset < nbytes) {
+ pr_notice("Beyond-end %s (%ld %ld)\n",
+ write ? "write" : "read", offset, nbytes);
+ return;
+ }
+
+ spin_lock(&dev->lock);
+ while (nbytes > 0) {
+ unsigned long io;
+
+ simc_lseek(dev->fd, offset, SEEK_SET);
+ READ_ONCE(*buffer);
+ if (write)
+ io = simc_write(dev->fd, buffer, nbytes);
+ else
+ io = simc_read(dev->fd, buffer, nbytes);
+ if (io == -1) {
+ pr_err("SIMDISK: IO error %d\n", errno);
+ break;
+ }
+ buffer += io;
+ offset += io;
+ nbytes -= io;
+ }
+ spin_unlock(&dev->lock);
+}
+
+static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct simdisk *dev = q->queuedata;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ sector_t sector = bio->bi_iter.bi_sector;
+
+ bio_for_each_segment(bvec, bio, iter) {
+ char *buffer = kmap_atomic(bvec.bv_page) + bvec.bv_offset;
+ unsigned len = bvec.bv_len >> SECTOR_SHIFT;
+
+ simdisk_transfer(dev, sector, len, buffer,
+ bio_data_dir(bio) == WRITE);
+ sector += len;
+ kunmap_atomic(buffer);
+ }
+
+ bio_endio(bio);
+ return BLK_QC_T_NONE;
+}
+
+static int simdisk_open(struct block_device *bdev, fmode_t mode)
+{
+ struct simdisk *dev = bdev->bd_disk->private_data;
+
+ spin_lock(&dev->lock);
+ if (!dev->users)
+ check_disk_change(bdev);
+ ++dev->users;
+ spin_unlock(&dev->lock);
+ return 0;
+}
+
+static void simdisk_release(struct gendisk *disk, fmode_t mode)
+{
+ struct simdisk *dev = disk->private_data;
+ spin_lock(&dev->lock);
+ --dev->users;
+ spin_unlock(&dev->lock);
+}
+
+static const struct block_device_operations simdisk_ops = {
+ .owner = THIS_MODULE,
+ .open = simdisk_open,
+ .release = simdisk_release,
+};
+
+static struct simdisk *sddev;
+static struct proc_dir_entry *simdisk_procdir;
+
+static int simdisk_attach(struct simdisk *dev, const char *filename)
+{
+ int err = 0;
+
+ filename = kstrdup(filename, GFP_KERNEL);
+ if (filename == NULL)
+ return -ENOMEM;
+
+ spin_lock(&dev->lock);
+
+ if (dev->fd != -1) {
+ err = -EBUSY;
+ goto out;
+ }
+ dev->fd = simc_open(filename, O_RDWR, 0);
+ if (dev->fd == -1) {
+ pr_err("SIMDISK: Can't open %s: %d\n", filename, errno);
+ err = -ENODEV;
+ goto out;
+ }
+ dev->size = simc_lseek(dev->fd, 0, SEEK_END);
+ set_capacity(dev->gd, dev->size >> SECTOR_SHIFT);
+ dev->filename = filename;
+ pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename);
+out:
+ if (err)
+ kfree(filename);
+ spin_unlock(&dev->lock);
+
+ return err;
+}
+
+static int simdisk_detach(struct simdisk *dev)
+{
+ int err = 0;
+
+ spin_lock(&dev->lock);
+
+ if (dev->users != 0) {
+ err = -EBUSY;
+ } else if (dev->fd != -1) {
+ if (simc_close(dev->fd)) {
+ pr_err("SIMDISK: error closing %s: %d\n",
+ dev->filename, errno);
+ err = -EIO;
+ } else {
+ pr_info("SIMDISK: %s detached from %s\n",
+ dev->gd->disk_name, dev->filename);
+ dev->fd = -1;
+ kfree(dev->filename);
+ dev->filename = NULL;
+ }
+ }
+ spin_unlock(&dev->lock);
+ return err;
+}
+
+static ssize_t proc_read_simdisk(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct simdisk *dev = PDE_DATA(file_inode(file));
+ const char *s = dev->filename;
+ if (s) {
+ ssize_t n = simple_read_from_buffer(buf, size, ppos,
+ s, strlen(s));
+ if (n < 0)
+ return n;
+ buf += n;
+ size -= n;
+ }
+ return simple_read_from_buffer(buf, size, ppos, "\n", 1);
+}
+
+static ssize_t proc_write_simdisk(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp = memdup_user_nul(buf, count);
+ struct simdisk *dev = PDE_DATA(file_inode(file));
+ int err;
+
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ err = simdisk_detach(dev);
+ if (err != 0)
+ goto out_free;
+
+ if (count > 0 && tmp[count - 1] == '\n')
+ tmp[count - 1] = 0;
+
+ if (tmp[0])
+ err = simdisk_attach(dev, tmp);
+
+ if (err == 0)
+ err = count;
+out_free:
+ kfree(tmp);
+ return err;
+}
+
+static const struct file_operations fops = {
+ .read = proc_read_simdisk,
+ .write = proc_write_simdisk,
+ .llseek = default_llseek,
+};
+
+static int __init simdisk_setup(struct simdisk *dev, int which,
+ struct proc_dir_entry *procdir)
+{
+ char tmp[2] = { '0' + which, 0 };
+
+ dev->fd = -1;
+ dev->filename = NULL;
+ spin_lock_init(&dev->lock);
+ dev->users = 0;
+
+ dev->queue = blk_alloc_queue(GFP_KERNEL);
+ if (dev->queue == NULL) {
+ pr_err("blk_alloc_queue failed\n");
+ goto out_alloc_queue;
+ }
+
+ blk_queue_make_request(dev->queue, simdisk_make_request);
+ dev->queue->queuedata = dev;
+
+ dev->gd = alloc_disk(SIMDISK_MINORS);
+ if (dev->gd == NULL) {
+ pr_err("alloc_disk failed\n");
+ goto out_alloc_disk;
+ }
+ dev->gd->major = simdisk_major;
+ dev->gd->first_minor = which;
+ dev->gd->fops = &simdisk_ops;
+ dev->gd->queue = dev->queue;
+ dev->gd->private_data = dev;
+ snprintf(dev->gd->disk_name, 32, "simdisk%d", which);
+ set_capacity(dev->gd, 0);
+ add_disk(dev->gd);
+
+ dev->procfile = proc_create_data(tmp, 0644, procdir, &fops, dev);
+ return 0;
+
+out_alloc_disk:
+ blk_cleanup_queue(dev->queue);
+ dev->queue = NULL;
+out_alloc_queue:
+ simc_close(dev->fd);
+ return -EIO;
+}
+
+static int __init simdisk_init(void)
+{
+ int i;
+
+ if (register_blkdev(simdisk_major, "simdisk") < 0) {
+ pr_err("SIMDISK: register_blkdev: %d\n", simdisk_major);
+ return -EIO;
+ }
+ pr_info("SIMDISK: major: %d\n", simdisk_major);
+
+ if (n_files > simdisk_count)
+ simdisk_count = n_files;
+ if (simdisk_count > MAX_SIMDISK_COUNT)
+ simdisk_count = MAX_SIMDISK_COUNT;
+
+ sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL);
+ if (sddev == NULL)
+ goto out_unregister;
+
+ simdisk_procdir = proc_mkdir("simdisk", 0);
+ if (simdisk_procdir == NULL)
+ goto out_free_unregister;
+
+ for (i = 0; i < simdisk_count; ++i) {
+ if (simdisk_setup(sddev + i, i, simdisk_procdir) == 0) {
+ if (filename[i] != NULL && filename[i][0] != 0 &&
+ (n_files == 0 || i < n_files))
+ simdisk_attach(sddev + i, filename[i]);
+ }
+ }
+
+ return 0;
+
+out_free_unregister:
+ kfree(sddev);
+out_unregister:
+ unregister_blkdev(simdisk_major, "simdisk");
+ return -ENOMEM;
+}
+module_init(simdisk_init);
+
+static void simdisk_teardown(struct simdisk *dev, int which,
+ struct proc_dir_entry *procdir)
+{
+ char tmp[2] = { '0' + which, 0 };
+
+ simdisk_detach(dev);
+ if (dev->gd)
+ del_gendisk(dev->gd);
+ if (dev->queue)
+ blk_cleanup_queue(dev->queue);
+ remove_proc_entry(tmp, procdir);
+}
+
+static void __exit simdisk_exit(void)
+{
+ int i;
+
+ for (i = 0; i < simdisk_count; ++i)
+ simdisk_teardown(sddev + i, i, simdisk_procdir);
+ remove_proc_entry("simdisk", 0);
+ kfree(sddev);
+ unregister_blkdev(simdisk_major, "simdisk");
+}
+module_exit(simdisk_exit);
+
+MODULE_ALIAS_BLOCKDEV_MAJOR(SIMDISK_MAJOR);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/xtensa/platforms/xt2000/Makefile b/arch/xtensa/platforms/xt2000/Makefile
new file mode 100644
index 000000000..54d018e45
--- /dev/null
+++ b/arch/xtensa/platforms/xt2000/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Tensilica XT2000 Emulation Board
+#
+
+obj-y = setup.o
diff --git a/arch/xtensa/platforms/xt2000/include/platform/hardware.h b/arch/xtensa/platforms/xt2000/include/platform/hardware.h
new file mode 100644
index 000000000..8e5e0d6a8
--- /dev/null
+++ b/arch/xtensa/platforms/xt2000/include/platform/hardware.h
@@ -0,0 +1,43 @@
+/*
+ * platform/hardware.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Tensilica Inc.
+ */
+
+/*
+ * This file contains the hardware configuration of the XT2000 board.
+ */
+
+#ifndef _XTENSA_XT2000_HARDWARE_H
+#define _XTENSA_XT2000_HARDWARE_H
+
+#include <variant/core.h>
+
+/*
+ * On-board components.
+ */
+
+#define SONIC83934_INTNUM XCHAL_EXTINT3_NUM
+#define SONIC83934_ADDR IOADDR(0x0d030000)
+
+/*
+ * V3-PCI
+ */
+
+/* The XT2000 uses the V3 as a cascaded interrupt controller for the PCI bus */
+
+#define IRQ_PCI_A (XCHAL_NUM_INTERRUPTS + 0)
+#define IRQ_PCI_B (XCHAL_NUM_INTERRUPTS + 1)
+#define IRQ_PCI_C (XCHAL_NUM_INTERRUPTS + 2)
+
+/*
+ * Various other components.
+ */
+
+#define XT2000_LED_ADDR IOADDR(0x0d040000)
+
+#endif /* _XTENSA_XT2000_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xt2000/include/platform/serial.h b/arch/xtensa/platforms/xt2000/include/platform/serial.h
new file mode 100644
index 000000000..7226cf732
--- /dev/null
+++ b/arch/xtensa/platforms/xt2000/include/platform/serial.h
@@ -0,0 +1,28 @@
+/*
+ * platform/serial.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_XT2000_SERIAL_H
+#define _XTENSA_XT2000_SERIAL_H
+
+#include <variant/core.h>
+#include <asm/io.h>
+
+/* National-Semi PC16552D DUART: */
+
+#define DUART16552_1_INTNUM XCHAL_EXTINT4_NUM
+#define DUART16552_2_INTNUM XCHAL_EXTINT5_NUM
+
+#define DUART16552_1_ADDR IOADDR(0x0d050020) /* channel 1 */
+#define DUART16552_2_ADDR IOADDR(0x0d050000) /* channel 2 */
+
+#define DUART16552_XTAL_FREQ 18432000 /* crystal frequency in Hz */
+#define BASE_BAUD ( DUART16552_XTAL_FREQ / 16 )
+
+#endif /* _XTENSA_XT2000_SERIAL_H */
diff --git a/arch/xtensa/platforms/xt2000/setup.c b/arch/xtensa/platforms/xt2000/setup.c
new file mode 100644
index 000000000..9c2f1fb96
--- /dev/null
+++ b/arch/xtensa/platforms/xt2000/setup.c
@@ -0,0 +1,155 @@
+/*
+ * arch/xtensa/platforms/xt2000/setup.c
+ *
+ * Platform specific functions for the XT2000 board.
+ *
+ * Authors: Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ *
+ * Copyright 2001 - 2004 Tensilica Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/stringify.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/bootparam.h>
+#include <platform/hardware.h>
+#include <platform/serial.h>
+
+/* Assumes s points to an 8-chr string. No checking for NULL. */
+
+static void led_print (int f, char *s)
+{
+ unsigned long* led_addr = (unsigned long*) (XT2000_LED_ADDR + 0xE0) + f;
+ int i;
+ for (i = f; i < 8; i++)
+ if ((*led_addr++ = *s++) == 0)
+ break;
+}
+
+void platform_halt(void)
+{
+ led_print (0, " HALT ");
+ local_irq_disable();
+ while (1);
+}
+
+void platform_power_off(void)
+{
+ led_print (0, "POWEROFF");
+ local_irq_disable();
+ while (1);
+}
+
+void platform_restart(void)
+{
+ /* Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector. */
+ cpu_reset();
+ /* control never gets here */
+}
+
+void __init platform_setup(char** cmdline)
+{
+ led_print (0, "LINUX ");
+}
+
+/* early initialization */
+
+void __init platform_init(bp_tag_t *first)
+{
+}
+
+/* Heartbeat. Let the LED blink. */
+
+void platform_heartbeat(void)
+{
+ static int i=0, t = 0;
+
+ if (--t < 0)
+ {
+ t = 59;
+ led_print(7, i ? ".": " ");
+ i ^= 1;
+ }
+}
+
+//#define RS_TABLE_SIZE 2
+
+#define _SERIAL_PORT(_base,_irq) \
+{ \
+ .mapbase = (_base), \
+ .membase = (void*)(_base), \
+ .irq = (_irq), \
+ .uartclk = DUART16552_XTAL_FREQ, \
+ .iotype = UPIO_MEM, \
+ .flags = UPF_BOOT_AUTOCONF, \
+ .regshift = 2, \
+}
+
+static struct plat_serial8250_port xt2000_serial_data[] = {
+#if XCHAL_HAVE_BE
+ _SERIAL_PORT(DUART16552_1_ADDR + 3, DUART16552_1_INTNUM),
+ _SERIAL_PORT(DUART16552_2_ADDR + 3, DUART16552_2_INTNUM),
+#else
+ _SERIAL_PORT(DUART16552_1_ADDR, DUART16552_1_INTNUM),
+ _SERIAL_PORT(DUART16552_2_ADDR, DUART16552_2_INTNUM),
+#endif
+ { }
+};
+
+static struct platform_device xt2000_serial8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = xt2000_serial_data,
+ },
+};
+
+static struct resource xt2000_sonic_res[] = {
+ {
+ .start = SONIC83934_ADDR,
+ .end = SONIC83934_ADDR + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = SONIC83934_INTNUM,
+ .end = SONIC83934_INTNUM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device xt2000_sonic_device = {
+ .name = "xtsonic",
+ .num_resources = ARRAY_SIZE(xt2000_sonic_res),
+ .resource = xt2000_sonic_res,
+};
+
+static int __init xt2000_setup_devinit(void)
+{
+ platform_device_register(&xt2000_serial8250_device);
+ platform_device_register(&xt2000_sonic_device);
+
+ return 0;
+}
+
+device_initcall(xt2000_setup_devinit);
diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
new file mode 100644
index 000000000..7839d38b2
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/Makefile
@@ -0,0 +1,10 @@
+# Makefile for the Tensilica xtavnet Emulation Board
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are in the main makefile...
+
+obj-y += setup.o
+obj-$(CONFIG_XTFPGA_LCD) += lcd.o
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
new file mode 100644
index 000000000..30d9cb6cf
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -0,0 +1,60 @@
+/*
+ * arch/xtensa/platform/xtavnet/include/platform/hardware.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Tensilica Inc.
+ */
+
+/*
+ * This file contains the hardware configuration of the XTAVNET boards.
+ */
+
+#include <asm/types.h>
+
+#ifndef __XTENSA_XTAVNET_HARDWARE_H
+#define __XTENSA_XTAVNET_HARDWARE_H
+
+/* Default assignment of LX60 devices to external interrupts. */
+
+#ifdef CONFIG_XTENSA_MX
+#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
+#define OETH_IRQ XCHAL_EXTINT4_NUM
+#define C67X00_IRQ XCHAL_EXTINT8_NUM
+#else
+#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
+#define OETH_IRQ XCHAL_EXTINT1_NUM
+#define C67X00_IRQ XCHAL_EXTINT5_NUM
+#endif
+
+/*
+ * Device addresses and parameters.
+ */
+
+/* UART */
+#define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
+
+/* Misc. */
+#define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
+/* Clock frequency in Hz (read-only): */
+#define XTFPGA_CLKFRQ_VADDR (XTFPGA_FPGAREGS_VADDR + 0x04)
+/* Setting of 8 DIP switches: */
+#define DIP_SWITCHES_VADDR (XTFPGA_FPGAREGS_VADDR + 0x0C)
+/* Software reset (write 0xdead): */
+#define XTFPGA_SWRST_VADDR (XTFPGA_FPGAREGS_VADDR + 0x10)
+
+/* OpenCores Ethernet controller: */
+ /* regs + RX/TX descriptors */
+#define OETH_REGS_PADDR (XCHAL_KIO_PADDR + 0x0D030000)
+#define OETH_REGS_SIZE 0x1000
+#define OETH_SRAMBUFF_PADDR (XCHAL_KIO_PADDR + 0x0D800000)
+
+ /* 5*rx buffs + 5*tx buffs */
+#define OETH_SRAMBUFF_SIZE (5 * 0x600 + 5 * 0x600)
+
+#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
+#define C67X00_SIZE 0x10
+
+#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
new file mode 100644
index 000000000..4c8541ed1
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
@@ -0,0 +1,35 @@
+/*
+ * arch/xtensa/platform/xtavnet/include/platform/lcd.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
+ */
+
+#ifndef __XTENSA_XTAVNET_LCD_H
+#define __XTENSA_XTAVNET_LCD_H
+
+#ifdef CONFIG_XTFPGA_LCD
+/* Display string STR at position POS on the LCD. */
+void lcd_disp_at_pos(char *str, unsigned char pos);
+
+/* Shift the contents of the LCD display left or right. */
+void lcd_shiftleft(void);
+void lcd_shiftright(void);
+#else
+static inline void lcd_disp_at_pos(char *str, unsigned char pos)
+{
+}
+
+static inline void lcd_shiftleft(void)
+{
+}
+
+static inline void lcd_shiftright(void)
+{
+}
+#endif
+
+#endif
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/serial.h b/arch/xtensa/platforms/xtfpga/include/platform/serial.h
new file mode 100644
index 000000000..14d8f7bee
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/serial.h
@@ -0,0 +1,18 @@
+/*
+ * arch/xtensa/platform/xtavnet/include/platform/serial.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
+ */
+
+#ifndef __ASM_XTENSA_XTAVNET_SERIAL_H
+#define __ASM_XTENSA_XTAVNET_SERIAL_H
+
+#include <platform/hardware.h>
+
+#define BASE_BAUD (*(long *)XTFPGA_CLKFRQ_VADDR / 16)
+
+#endif /* __ASM_XTENSA_XTAVNET_SERIAL_H */
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
new file mode 100644
index 000000000..2f7eb66c2
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
@@ -0,0 +1,89 @@
+/*
+ * Driver for the LCD display on the Tensilica XTFPGA board family.
+ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <platform/hardware.h>
+#include <platform/lcd.h>
+
+/* LCD instruction and data addresses. */
+#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
+#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
+
+#define LCD_CLEAR 0x1
+#define LCD_DISPLAY_ON 0xc
+
+/* 8bit and 2 lines display */
+#define LCD_DISPLAY_MODE8BIT 0x38
+#define LCD_DISPLAY_MODE4BIT 0x28
+#define LCD_DISPLAY_POS 0x80
+#define LCD_SHIFT_LEFT 0x18
+#define LCD_SHIFT_RIGHT 0x1c
+
+static void lcd_put_byte(u8 *addr, u8 data)
+{
+#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
+ WRITE_ONCE(*addr, data);
+#else
+ WRITE_ONCE(*addr, data & 0xf0);
+ WRITE_ONCE(*addr, (data << 4) & 0xf0);
+#endif
+}
+
+static int __init lcd_init(void)
+{
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
+ mdelay(5);
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
+ udelay(200);
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
+ udelay(50);
+#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
+ udelay(50);
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
+ udelay(50);
+#endif
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
+ udelay(50);
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
+ mdelay(10);
+ lcd_disp_at_pos("XTENSA LINUX", 0);
+ return 0;
+}
+
+void lcd_disp_at_pos(char *str, unsigned char pos)
+{
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
+ udelay(100);
+ while (*str != 0) {
+ lcd_put_byte(LCD_DATA_ADDR, *str);
+ udelay(200);
+ str++;
+ }
+}
+
+void lcd_shiftleft(void)
+{
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
+ udelay(50);
+}
+
+void lcd_shiftright(void)
+{
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
+ udelay(50);
+}
+
+arch_initcall(lcd_init);
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
new file mode 100644
index 000000000..db5122765
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -0,0 +1,310 @@
+/*
+ *
+ * arch/xtensa/platform/xtavnet/setup.c
+ *
+ * ...
+ *
+ * Authors: Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ *
+ * Copyright 2001 - 2006 Tensilica Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+
+#include <asm/timex.h>
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/bootparam.h>
+#include <platform/lcd.h>
+#include <platform/hardware.h>
+
+void platform_halt(void)
+{
+ lcd_disp_at_pos(" HALT ", 0);
+ local_irq_disable();
+ while (1)
+ cpu_relax();
+}
+
+void platform_power_off(void)
+{
+ lcd_disp_at_pos("POWEROFF", 0);
+ local_irq_disable();
+ while (1)
+ cpu_relax();
+}
+
+void platform_restart(void)
+{
+ /* Try software reset first. */
+ WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
+
+ /* If software reset did not work, flush and reset the mmu,
+ * simulate a processor reset, and jump to the reset vector.
+ */
+ cpu_reset();
+ /* control never gets here */
+}
+
+void __init platform_setup(char **cmdline)
+{
+}
+
+/* early initialization */
+
+void __init platform_init(bp_tag_t *first)
+{
+}
+
+/* Heartbeat. */
+
+void platform_heartbeat(void)
+{
+}
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+
+void __init platform_calibrate_ccount(void)
+{
+ ccount_freq = *(long *)XTFPGA_CLKFRQ_VADDR;
+}
+
+#endif
+
+#ifdef CONFIG_USE_OF
+
+static void __init xtfpga_clk_setup(struct device_node *np)
+{
+ void __iomem *base = of_iomap(np, 0);
+ struct clk *clk;
+ u32 freq;
+
+ if (!base) {
+ pr_err("%s: invalid address\n", np->name);
+ return;
+ }
+
+ freq = __raw_readl(base);
+ iounmap(base);
+ clk = clk_register_fixed_rate(NULL, np->name, NULL, 0, freq);
+
+ if (IS_ERR(clk)) {
+ pr_err("%s: clk registration failed\n", np->name);
+ return;
+ }
+
+ if (of_clk_add_provider(np, of_clk_src_simple_get, clk)) {
+ pr_err("%s: clk provider registration failed\n", np->name);
+ return;
+ }
+}
+CLK_OF_DECLARE(xtfpga_clk, "cdns,xtfpga-clock", xtfpga_clk_setup);
+
+#define MAC_LEN 6
+static void __init update_local_mac(struct device_node *node)
+{
+ struct property *newmac;
+ const u8* macaddr;
+ int prop_len;
+
+ macaddr = of_get_property(node, "local-mac-address", &prop_len);
+ if (macaddr == NULL || prop_len != MAC_LEN)
+ return;
+
+ newmac = kzalloc(sizeof(*newmac) + MAC_LEN, GFP_KERNEL);
+ if (newmac == NULL)
+ return;
+
+ newmac->value = newmac + 1;
+ newmac->length = MAC_LEN;
+ newmac->name = kstrdup("local-mac-address", GFP_KERNEL);
+ if (newmac->name == NULL) {
+ kfree(newmac);
+ return;
+ }
+
+ memcpy(newmac->value, macaddr, MAC_LEN);
+ ((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f;
+ of_update_property(node, newmac);
+}
+
+static int __init machine_setup(void)
+{
+ struct device_node *eth = NULL;
+
+ if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
+ update_local_mac(eth);
+ return 0;
+}
+arch_initcall(machine_setup);
+
+#else
+
+#include <linux/serial_8250.h>
+#include <linux/if.h>
+#include <net/ethoc.h>
+#include <linux/usb/c67x00.h>
+
+/*----------------------------------------------------------------------------
+ * Ethernet -- OpenCores Ethernet MAC (ethoc driver)
+ */
+
+static struct resource ethoc_res[] = {
+ [0] = { /* register space */
+ .start = OETH_REGS_PADDR,
+ .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = { /* buffer space */
+ .start = OETH_SRAMBUFF_PADDR,
+ .end = OETH_SRAMBUFF_PADDR + OETH_SRAMBUFF_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = { /* IRQ number */
+ .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct ethoc_platform_data ethoc_pdata = {
+ /*
+ * The MAC address for these boards is 00:50:c2:13:6f:xx.
+ * The last byte (here as zero) is read from the DIP switches on the
+ * board.
+ */
+ .hwaddr = { 0x00, 0x50, 0xc2, 0x13, 0x6f, 0 },
+ .phy_id = -1,
+ .big_endian = XCHAL_HAVE_BE,
+};
+
+static struct platform_device ethoc_device = {
+ .name = "ethoc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ethoc_res),
+ .resource = ethoc_res,
+ .dev = {
+ .platform_data = &ethoc_pdata,
+ },
+};
+
+/*----------------------------------------------------------------------------
+ * USB Host/Device -- Cypress CY7C67300
+ */
+
+static struct resource c67x00_res[] = {
+ [0] = { /* register space */
+ .start = C67X00_PADDR,
+ .end = C67X00_PADDR + C67X00_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = { /* IRQ number */
+ .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct c67x00_platform_data c67x00_pdata = {
+ .sie_config = C67X00_SIE1_HOST | C67X00_SIE2_UNUSED,
+ .hpi_regstep = 4,
+};
+
+static struct platform_device c67x00_device = {
+ .name = "c67x00",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(c67x00_res),
+ .resource = c67x00_res,
+ .dev = {
+ .platform_data = &c67x00_pdata,
+ },
+};
+
+/*----------------------------------------------------------------------------
+ * UART
+ */
+
+static struct resource serial_resource = {
+ .start = DUART16552_PADDR,
+ .end = DUART16552_PADDR + 0x1f,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct plat_serial8250_port serial_platform_data[] = {
+ [0] = {
+ .mapbase = DUART16552_PADDR,
+ .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+ UPF_IOREMAP,
+ .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
+ .regshift = 2,
+ .uartclk = 0, /* set in xtavnet_init() */
+ },
+ { },
+};
+
+static struct platform_device xtavnet_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = serial_platform_data,
+ },
+ .num_resources = 1,
+ .resource = &serial_resource,
+};
+
+/* platform devices */
+static struct platform_device *platform_devices[] __initdata = {
+ &ethoc_device,
+ &c67x00_device,
+ &xtavnet_uart,
+};
+
+
+static int __init xtavnet_init(void)
+{
+ /* Ethernet MAC address. */
+ ethoc_pdata.hwaddr[5] = *(u32 *)DIP_SWITCHES_VADDR;
+
+ /* Clock rate varies among FPGA bitstreams; board specific FPGA register
+ * reports the actual clock rate.
+ */
+ serial_platform_data[0].uartclk = *(long *)XTFPGA_CLKFRQ_VADDR;
+
+
+ /* register platform devices */
+ platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+
+ /* ETHOC driver is a bit quiet; at least display Ethernet MAC, so user
+ * knows whether they set it correctly on the DIP switches.
+ */
+ pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
+ ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR;
+
+ return 0;
+}
+
+/*
+ * Register to be done during do_initcalls().
+ */
+arch_initcall(xtavnet_init);
+
+#endif /* CONFIG_USE_OF */
diff --git a/arch/xtensa/variants/csp/include/variant/core.h b/arch/xtensa/variants/csp/include/variant/core.h
new file mode 100644
index 000000000..ccd81f07e
--- /dev/null
+++ b/arch/xtensa/variants/csp/include/variant/core.h
@@ -0,0 +1,575 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 1 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 4 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+
+#define XCHAL_HAVE_FUSION 0 /* Fusion*/
+#define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */
+#define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */
+#define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */
+#define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */
+#define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */
+#define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */
+#define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */
+#define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+
+
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector or user floating-point pkg */
+#define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_USER_SPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */
+
+#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */
+#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 0 /* DFPU Coprocessor, single and double precision */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_PDX4 0 /* PDX4 */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+#define XCHAL_HAVE_GRIVPEP 0 /* GRIVPEP is General Release of IVPEP */
+#define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 16 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+#define XCHAL_CLOCK_GATING_GLOBAL 0 /* global clock gating */
+#define XCHAL_CLOCK_GATING_FUNCUNIT 0 /* funct. unit clock gating */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 1100002 /* sw version of this header */
+
+#define XCHAL_CORE_ID "xt_lnx" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00057D54 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC1B3FFFE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x1C857D54 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX6.0.2" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 260002 /* major*100+minor */
+#define XCHAL_HW_REL_LX6 1
+#define XCHAL_HW_REL_LX6_0 1
+#define XCHAL_HW_REL_LX6_0_2 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2600 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 260002 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2600 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 260002 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 64 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 64 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 6 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 6 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 65536 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 0 /* dcache pref. castout bufsz */
+#define XCHAL_PREFETCH_ENTRIES 0 /* cache prefetch entries */
+#define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */
+#define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */
+#define XCHAL_HAVE_ICACHE_TEST 1 /* Icache test instructions */
+#define XCHAL_HAVE_DCACHE_TEST 1 /* Dcache test instructions */
+#define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */
+#define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 8
+#define XCHAL_DCACHE_SETWIDTH 6
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 16
+#define XCHAL_DCACHE_ACCESS_SIZE 16
+
+#define XCHAL_DCACHE_BANKS 1 /* number of banks */
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+/* Whether MEMCTL register has anything useful */
+#define XCHAL_USE_MEMCTL (((XCHAL_LOOP_BUFFER_SIZE > 0) || \
+ XCHAL_DCACHE_IS_COHERENT || \
+ XCHAL_HAVE_ICACHE_DYN_WAYS || \
+ XCHAL_HAVE_DCACHE_DYN_WAYS) && \
+ (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0))
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 16 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_MASK 0x00001140
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00008000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F11FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F1FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 2
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 2
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 4
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_PROFILING
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F1000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000033F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00008000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+#define XCHAL_PROFILING_INTERRUPT 15 /* profiling interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL4_NUM 15
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 2, 3.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 2) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 21 /* (intlevel 3) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */
+#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */
+#define XCHAL_INT12_EXTNUM 8 /* (intlevel 2) */
+#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */
+#define XCHAL_INT16_EXTNUM 10 /* (intlevel 1) */
+#define XCHAL_INT17_EXTNUM 11 /* (intlevel 1) */
+#define XCHAL_INT18_EXTNUM 12 /* (intlevel 1) */
+#define XCHAL_INT19_EXTNUM 13 /* (intlevel 1) */
+#define XCHAL_INT20_EXTNUM 14 /* (intlevel 1) */
+#define XCHAL_INT21_EXTNUM 15 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 1 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 1 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 1 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 262144 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 1 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 8 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/csp/include/variant/tie-asm.h b/arch/xtensa/variants/csp/include/variant/tie-asm.h
new file mode 100644
index 000000000..ba773c4a2
--- /dev/null
+++ b/arch/xtensa/variants/csp/include/variant/tie-asm.h
@@ -0,0 +1,194 @@
+/*
+ * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file contains assembly-language definitions (assembly
+ macros, etc.) for this specific Xtensa processor's TIE extensions
+ and options. It is customized to this Xtensa processor configuration.
+
+ Copyright (c) 1999-2015 Cadence Design Systems Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
+ | ((ccuse) & XTHAL_SAS_ANYCC) \
+ | ((abi) & XTHAL_SAS_ANYABI) )
+
+
+ /*
+ * Macro to store all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger store sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to store. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to store, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any store.
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ rur.THREADPTR \at1 // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ rsr.ACCLO \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.ACCHI \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ rsr.BR \at1 // boolean option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.SCOMPARE1 \at1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rsr.M0 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rsr.M1 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rsr.M2 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ rsr.M3 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+20
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .endif
+ .endm // xchal_ncp_store
+
+ /*
+ * Macro to load all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger load sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to load. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to load, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any load.
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.THREADPTR \at1 // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.ACCLO \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.ACCHI \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.BR \at1 // boolean option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.SCOMPARE1 \at1 // conditional store option
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wsr.M0 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wsr.M1 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wsr.M2 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+20
+ wsr.M3 \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .endif
+ .endm // xchal_ncp_load
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+#define XCHAL_SA_NUM_ATMPS 1
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/csp/include/variant/tie.h b/arch/xtensa/variants/csp/include/variant/tie.h
new file mode 100644
index 000000000..3ce391cfe
--- /dev/null
+++ b/arch/xtensa/variants/csp/include/variant/tie.h
@@ -0,0 +1,161 @@
+/*
+ * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file describes this specific Xtensa processor's TIE extensions
+ that extend basic Xtensa core functionality. It is customized to this
+ Xtensa processor configuration.
+
+ Copyright (c) 1999-2015 Cadence Design Systems Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 1 /* number of coprocessors */
+#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x80 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
+
+/* Basic parameters of each coprocessor: */
+#define XCHAL_CP7_NAME "XTIOP"
+#define XCHAL_CP7_IDENT XTIOP
+#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
+#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP1_SA_SIZE 0
+#define XCHAL_CP1_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 36
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 48 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 9
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, br, 4, 4, 4,0x0204, sr,4 , 16,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 0
+#define XCHAL_CP1_SA_LIST(s) /* empty */
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+/* Byte length of instruction from its first byte, per FLIX. */
+#define XCHAL_BYTE0_FORMAT_LENGTHS \
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
new file mode 100644
index 000000000..525bd3d90
--- /dev/null
+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
@@ -0,0 +1,424 @@
+/*
+ * Xtensa processor core configuration information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+
+#define XCHAL_SW_VERSION 701001 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dc232b" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "Diamond 232L Standard Core Rev.B (LE)"
+#define XCHAL_BUILD_UNIQUE_ID 0x0000BEEF /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC56307FE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x0D40BEEF /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX2.1.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2210 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 221001 /* major*100+minor */
+#define XCHAL_HW_REL_LX2 1
+#define XCHAL_HW_REL_LX2_1 1
+#define XCHAL_HW_REL_LX2_1_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2210 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 221001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2210 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 221001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0xD0000000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00000000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0xD8000500
+#define XCHAL_RESET_VECTOR1_PADDR 0x00000500
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0xD0000340
+#define XCHAL_USER_VECTOR_PADDR 0x00000340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD00003C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000003C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD00001C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000001C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xD0000200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00000200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0xD0000240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00000240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0xD0000280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00000280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0xD00002C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000002C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/dc232b/include/variant/tie-asm.h b/arch/xtensa/variants/dc232b/include/variant/tie-asm.h
new file mode 100644
index 000000000..ed4f53f52
--- /dev/null
+++ b/arch/xtensa/variants/dc232b/include/variant/tie-asm.h
@@ -0,0 +1,122 @@
+/*
+ * This header file contains assembly-language definitions (assembly
+ * macros, etc.) for this specific Xtensa processor's TIE extensions
+ * and options. It is customized to this Xtensa processor configuration.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+
+
+
+/* Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Save area ptr (clobbered): ptr (1 byte aligned)
+ * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
+ xchal_sa_start \continue, \ofs
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 1024-8, 4, 4
+ rsr \at1, ACCLO // MAC16 accumulator
+ rsr \at2, ACCHI
+ s32i \at1, \ptr, .Lxchal_ofs_ + 0
+ s32i \at2, \ptr, .Lxchal_ofs_ + 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 1024-16, 4, 4
+ rsr \at1, M0 // MAC16 registers
+ rsr \at2, M1
+ s32i \at1, \ptr, .Lxchal_ofs_ + 0
+ s32i \at2, \ptr, .Lxchal_ofs_ + 4
+ rsr \at1, M2
+ rsr \at2, M3
+ s32i \at1, \ptr, .Lxchal_ofs_ + 8
+ s32i \at2, \ptr, .Lxchal_ofs_ + 12
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 16
+ .endif
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ rsr \at1, SCOMPARE1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_ + 0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ rur \at1, THREADPTR // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_ + 0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .endm // xchal_ncp_store
+
+/* Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Save area ptr (clobbered): ptr (1 byte aligned)
+ * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
+ xchal_sa_start \continue, \ofs
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 1024-8, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_ + 0
+ l32i \at2, \ptr, .Lxchal_ofs_ + 4
+ wsr \at1, ACCLO // MAC16 accumulator
+ wsr \at2, ACCHI
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 1024-16, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_ + 0
+ l32i \at2, \ptr, .Lxchal_ofs_ + 4
+ wsr \at1, M0 // MAC16 registers
+ wsr \at2, M1
+ l32i \at1, \ptr, .Lxchal_ofs_ + 8
+ l32i \at2, \ptr, .Lxchal_ofs_ + 12
+ wsr \at1, M2
+ wsr \at2, M3
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 16
+ .endif
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_ + 0
+ wsr \at1, SCOMPARE1 // conditional store option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_ + 0
+ wur \at1, THREADPTR // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .endm // xchal_ncp_load
+
+
+
+#define XCHAL_NCP_NUM_ATMPS 2
+
+
+#define XCHAL_SA_NUM_ATMPS 2
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/dc232b/include/variant/tie.h b/arch/xtensa/variants/dc232b/include/variant/tie.h
new file mode 100644
index 000000000..018e81af4
--- /dev/null
+++ b/arch/xtensa/variants/dc232b/include/variant/tie.h
@@ -0,0 +1,131 @@
+/*
+ * This header file describes this specific Xtensa processor's TIE extensions
+ * that extend basic Xtensa core functionality. It is customized to this
+ * Xtensa processor configuration.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 1 /* number of coprocessors */
+#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x80 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
+
+/* Basic parameters of each coprocessor: */
+#define XCHAL_CP7_NAME "XTIOP"
+#define XCHAL_CP7_IDENT XTIOP
+#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
+#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP1_SA_SIZE 0
+#define XCHAL_CP1_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 32
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 32 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 8
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 0
+#define XCHAL_CP1_SA_LIST(s) /* empty */
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/arch/xtensa/variants/dc233c/include/variant/core.h b/arch/xtensa/variants/dc233c/include/variant/core.h
new file mode 100644
index 000000000..3a2e53b94
--- /dev/null
+++ b/arch/xtensa/variants/dc233c/include/variant/core.h
@@ -0,0 +1,475 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 900001 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dc233c" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "dc233c"
+#define XCHAL_BUILD_UNIQUE_ID 0x00004B21 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC56707FE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x14404B21 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX4.0.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2400 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 240001 /* major*100+minor */
+#define XCHAL_HW_REL_LX4 1
+#define XCHAL_HW_REL_LX4_0 1
+#define XCHAL_HW_REL_LX4_0_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2400 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 240001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2400 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 240001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 4
+#define XCHAL_DCACHE_ACCESS_SIZE 4
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/dc233c/include/variant/tie-asm.h b/arch/xtensa/variants/dc233c/include/variant/tie-asm.h
new file mode 100644
index 000000000..5dbd981ea
--- /dev/null
+++ b/arch/xtensa/variants/dc233c/include/variant/tie-asm.h
@@ -0,0 +1,193 @@
+/*
+ * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file contains assembly-language definitions (assembly
+ macros, etc.) for this specific Xtensa processor's TIE extensions
+ and options. It is customized to this Xtensa processor configuration.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
+ | ((ccuse) & XTHAL_SAS_ANYCC) \
+ | ((abi) & XTHAL_SAS_ANYABI) )
+
+
+
+ /*
+ * Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger store sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to store. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to store, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any store.
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global register used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ rur.THREADPTR \at1 // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ rsr \at1, ACCLO // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr \at1, ACCHI // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ rsr \at1, M0 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr \at1, M1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rsr \at1, M2 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rsr \at1, M3 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rsr \at1, SCOMPARE1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .endif
+ .endm // xchal_ncp_store
+
+ /*
+ * Macro to restore all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger load sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to load. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to load, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any load.
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global register used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.THREADPTR \at1 // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr \at1, ACCLO // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr \at1, ACCHI // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr \at1, M0 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr \at1, M1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wsr \at1, M2 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wsr \at1, M3 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wsr \at1, SCOMPARE1 // conditional store option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .endif
+ .endm // xchal_ncp_load
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+
+
+#define XCHAL_SA_NUM_ATMPS 1
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/dc233c/include/variant/tie.h b/arch/xtensa/variants/dc233c/include/variant/tie.h
new file mode 100644
index 000000000..815e52bc3
--- /dev/null
+++ b/arch/xtensa/variants/dc233c/include/variant/tie.h
@@ -0,0 +1,150 @@
+/*
+ * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file describes this specific Xtensa processor's TIE extensions
+ that extend basic Xtensa core functionality. It is customized to this
+ Xtensa processor configuration.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 1 /* number of coprocessors */
+#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x80 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
+
+/* Basic parameters of each coprocessor: */
+#define XCHAL_CP7_NAME "XTIOP"
+#define XCHAL_CP7_IDENT XTIOP
+#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
+#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP1_SA_SIZE 0
+#define XCHAL_CP1_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 32
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 32 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 8
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 0
+#define XCHAL_CP1_SA_LIST(s) /* empty */
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/arch/xtensa/variants/de212/include/variant/core.h b/arch/xtensa/variants/de212/include/variant/core.h
new file mode 100644
index 000000000..59e91e47e
--- /dev/null
+++ b/arch/xtensa/variants/de212/include/variant/core.h
@@ -0,0 +1,594 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 0 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+
+#define XCHAL_HAVE_FUSION 0 /* Fusion*/
+#define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */
+#define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */
+#define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */
+#define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */
+#define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */
+#define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */
+#define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */
+#define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+
+
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector or user floating-point pkg */
+#define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_USER_SPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */
+
+#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */
+#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 0 /* DFPU Coprocessor, single and double precision */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_PDX4 0 /* PDX4 */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+#define XCHAL_HAVE_GRIVPEP 0 /* GRIVPEP is General Release of IVPEP */
+#define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+#define XCHAL_CLOCK_GATING_GLOBAL 0 /* global clock gating */
+#define XCHAL_CLOCK_GATING_FUNCUNIT 0 /* funct. unit clock gating */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 1100002 /* sw version of this header */
+
+#define XCHAL_CORE_ID "de212" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x0005A985 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC283DFFE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x1C85A985 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX6.0.2" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 260002 /* major*100+minor */
+#define XCHAL_HW_REL_LX6 1
+#define XCHAL_HW_REL_LX6_0 1
+#define XCHAL_HW_REL_LX6_0_2 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2600 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 260002 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2600 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 260002 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 0 /* dcache pref. castout bufsz */
+#define XCHAL_PREFETCH_ENTRIES 0 /* cache prefetch entries */
+#define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */
+#define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */
+#define XCHAL_HAVE_ICACHE_TEST 1 /* Icache test instructions */
+#define XCHAL_HAVE_DCACHE_TEST 1 /* Dcache test instructions */
+#define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */
+#define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 4
+#define XCHAL_DCACHE_ACCESS_SIZE 4
+
+#define XCHAL_DCACHE_BANKS 1 /* number of banks */
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+/* Whether MEMCTL register has anything useful */
+#define XCHAL_USE_MEMCTL (((XCHAL_LOOP_BUFFER_SIZE > 0) || \
+ XCHAL_DCACHE_IS_COHERENT || \
+ XCHAL_HAVE_ICACHE_DYN_WAYS || \
+ XCHAL_HAVE_DCACHE_DYN_WAYS) && \
+ (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0))
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 1 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 1 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 1 /* number of core XLMI ports */
+
+/* Instruction RAM 0: */
+#define XCHAL_INSTRAM0_VADDR 0x40000000 /* virtual address */
+#define XCHAL_INSTRAM0_PADDR 0x40000000 /* physical address */
+#define XCHAL_INSTRAM0_SIZE 131072 /* size in bytes */
+#define XCHAL_INSTRAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */
+
+/* Data RAM 0: */
+#define XCHAL_DATARAM0_VADDR 0x3FFE0000 /* virtual address */
+#define XCHAL_DATARAM0_PADDR 0x3FFE0000 /* physical address */
+#define XCHAL_DATARAM0_SIZE 131072 /* size in bytes */
+#define XCHAL_DATARAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */
+#define XCHAL_DATARAM0_BANKS 1 /* number of banks */
+
+/* XLMI Port 0: */
+#define XCHAL_XLMI0_VADDR 0x3FFC0000 /* virtual address */
+#define XCHAL_XLMI0_PADDR 0x3FFC0000 /* physical address */
+#define XCHAL_XLMI0_SIZE 131072 /* size in bytes */
+#define XCHAL_XLMI0_ECC_PARITY 0 /* ECC/parity type, 0=none */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */
+#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */
+#define XCHAL_INT12_EXTNUM 8 /* (intlevel 4) */
+#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */
+#define XCHAL_INT15_EXTNUM 10 /* (intlevel 1) */
+#define XCHAL_INT16_EXTNUM 11 /* (intlevel 1) */
+#define XCHAL_INT17_EXTNUM 12 /* (intlevel 1) */
+#define XCHAL_INT18_EXTNUM 13 /* (intlevel 1) */
+#define XCHAL_INT19_EXTNUM 14 /* (intlevel 1) */
+#define XCHAL_INT20_EXTNUM 15 /* (intlevel 1) */
+#define XCHAL_INT21_EXTNUM 16 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x60000000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x60000000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0x50000000
+#define XCHAL_RESET_VECTOR0_PADDR 0x50000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x40000400
+#define XCHAL_RESET_VECTOR1_PADDR 0x40000400
+#define XCHAL_RESET_VECTOR_VADDR 0x50000000
+#define XCHAL_RESET_VECTOR_PADDR 0x50000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x60000340
+#define XCHAL_USER_VECTOR_PADDR 0x60000340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x60000300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x60000300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x600003C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x600003C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x60000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x60000000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x60000180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x60000180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x600001C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x600001C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x60000200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x60000200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x60000240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x60000240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x60000280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x60000280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x600002C0
+#define XCHAL_NMI_VECTOR_PADDR 0x600002C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 1 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 1 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 262144 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 0 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 0 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 0 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 1 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 1 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+
+#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/de212/include/variant/tie-asm.h b/arch/xtensa/variants/de212/include/variant/tie-asm.h
new file mode 100644
index 000000000..77755354f
--- /dev/null
+++ b/arch/xtensa/variants/de212/include/variant/tie-asm.h
@@ -0,0 +1,170 @@
+/*
+ * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file contains assembly-language definitions (assembly
+ macros, etc.) for this specific Xtensa processor's TIE extensions
+ and options. It is customized to this Xtensa processor configuration.
+
+ Copyright (c) 1999-2015 Cadence Design Systems Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
+ | ((ccuse) & XTHAL_SAS_ANYCC) \
+ | ((abi) & XTHAL_SAS_ANYABI) )
+
+
+ /*
+ * Macro to store all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger store sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to store. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to store, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any store.
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ rsr.ACCLO \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.ACCHI \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ rsr.SCOMPARE1 \at1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.M0 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rsr.M1 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rsr.M2 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rsr.M3 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .endif
+ .endm // xchal_ncp_store
+
+ /*
+ * Macro to load all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger load sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to load. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to load, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any load.
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.ACCLO \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.ACCHI \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.SCOMPARE1 \at1 // conditional store option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.M0 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wsr.M1 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wsr.M2 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wsr.M3 \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .endif
+ .endm // xchal_ncp_load
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+#define XCHAL_SA_NUM_ATMPS 1
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/de212/include/variant/tie.h b/arch/xtensa/variants/de212/include/variant/tie.h
new file mode 100644
index 000000000..b8a061a3f
--- /dev/null
+++ b/arch/xtensa/variants/de212/include/variant/tie.h
@@ -0,0 +1,136 @@
+/*
+ * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file describes this specific Xtensa processor's TIE extensions
+ that extend basic Xtensa core functionality. It is customized to this
+ Xtensa processor configuration.
+
+ Copyright (c) 1999-2015 Cadence Design Systems Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 0 /* number of coprocessors */
+#define XCHAL_CP_MAX 0 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 28
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 32 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 7
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 0
+#define XCHAL_CP1_SA_LIST(s) /* empty */
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+/* Byte length of instruction from its first byte, per FLIX. */
+#define XCHAL_BYTE0_FORMAT_LENGTHS \
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
new file mode 100644
index 000000000..2f337605c
--- /dev/null
+++ b/arch/xtensa/variants/fsf/include/variant/core.h
@@ -0,0 +1,359 @@
+/*
+ * Xtensa processor core configuration information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2006 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_H
+#define _XTENSA_CORE_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 1 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 64 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 0 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 0 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 0 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 0 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 0 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+
+#define XCHAL_CORE_ID "fsf" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00006700 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC103C3FF /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x0C006700 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX2.0.0" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2200 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 0 /* minor ver# of targeted hw */
+#define XTHAL_HW_REL_LX2 1
+#define XTHAL_HW_REL_LX2_0 1
+#define XTHAL_HW_REL_LX2_0_0 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2200 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 0 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2200 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 0 /* minor v of latest tgt hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 0 /* writeback feature */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 8
+#define XCHAL_DCACHE_SETWIDTH 8
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 0
+#define XCHAL_DCACHE_LINE_LOCKABLE 0
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 0 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 17 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 10 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_MASK 0x00008902
+#define XCHAL_INTLEVEL3_MASK 0x00011204
+#define XCHAL_INTLEVEL4_MASK 0x00000000
+#define XCHAL_INTLEVEL5_MASK 0x00000000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00000000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x0000EDFB
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x0001FFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 2
+#define XCHAL_INT2_LEVEL 3
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 1
+#define XCHAL_INT11_LEVEL 2
+#define XCHAL_INT12_LEVEL 3
+#define XCHAL_INT13_LEVEL 1
+#define XCHAL_INT14_LEVEL 1
+#define XCHAL_INT15_LEVEL 2
+#define XCHAL_INT16_LEVEL 3
+#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 0 /* OCD external db interrupt */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_SOFTWARE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFE0000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x0001E000
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000380
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000007F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00001C00
+#define XCHAL_INTTYPE_MASK_NMI 0x00000000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 10 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 11 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 12 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+/* (There are many interrupts each at level(s) 1, 2, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 2) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 3) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
+#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
+#define XCHAL_EXTINT8_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT9_NUM 9 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000020
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000020
+#define XCHAL_USER_VECTOR_VADDR 0xD0000220
+#define XCHAL_USER_VECTOR_PADDR 0x00000220
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000200
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000200
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD0000290
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x00000290
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000240
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000240
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD0000250
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x00000250
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xFE000520
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0xFE000520
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See <xtensa/config/core-matmap.h> header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/fsf/include/variant/tie-asm.h b/arch/xtensa/variants/fsf/include/variant/tie-asm.h
new file mode 100644
index 000000000..68a73bf4f
--- /dev/null
+++ b/arch/xtensa/variants/fsf/include/variant/tie-asm.h
@@ -0,0 +1,70 @@
+/*
+ * This header file contains assembly-language definitions (assembly
+ * macros, etc.) for this specific Xtensa processor's TIE extensions
+ * and options. It is customized to this Xtensa processor configuration.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2008 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+
+
+
+/* Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Save area ptr (clobbered): ptr (1 byte aligned)
+ * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
+ xchal_sa_start \continue, \ofs
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ rur \at1, THREADPTR // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_ + 0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .endm // xchal_ncp_store
+
+/* Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Save area ptr (clobbered): ptr (1 byte aligned)
+ * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
+ xchal_sa_start \continue, \ofs
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_ + 0
+ wur \at1, THREADPTR // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .endm // xchal_ncp_load
+
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+
+#define XCHAL_SA_NUM_ATMPS 1
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/fsf/include/variant/tie.h b/arch/xtensa/variants/fsf/include/variant/tie.h
new file mode 100644
index 000000000..244cdea4d
--- /dev/null
+++ b/arch/xtensa/variants/fsf/include/variant/tie.h
@@ -0,0 +1,72 @@
+/*
+ * This header file describes this specific Xtensa processor's TIE extensions
+ * that extend basic Xtensa core functionality. It is customized to this
+ * Xtensa processor configuration.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2007 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 0 /* number of coprocessors */
+#define XCHAL_CP_MAX 0 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_NCP_SA_SIZE 0
+#define XCHAL_NCP_SA_ALIGN 1
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP1_SA_SIZE 0
+#define XCHAL_CP1_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+#define XCHAL_CP7_SA_SIZE 0
+#define XCHAL_CP7_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 0
+#define XCHAL_NCP_SA_ALIGN 1
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 0 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 1 /* actual minimum alignment */
+
+#define XCHAL_NCP_SA_NUM 0
+#define XCHAL_NCP_SA_LIST(s)
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s)
+#define XCHAL_CP1_SA_NUM 0
+#define XCHAL_CP1_SA_LIST(s)
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s)
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s)
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s)
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s)
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s)
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s)
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/arch/xtensa/variants/test_kc705_be/include/variant/core.h b/arch/xtensa/variants/test_kc705_be/include/variant/core.h
new file mode 100644
index 000000000..a4ebdf719
--- /dev/null
+++ b/arch/xtensa/variants/test_kc705_be/include/variant/core.h
@@ -0,0 +1,575 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 1 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 1 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+
+#define XCHAL_HAVE_FUSION 0 /* Fusion*/
+#define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */
+#define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */
+#define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */
+#define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */
+#define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */
+#define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */
+#define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */
+#define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI2 1 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2_MUL32X24 1 /* HiFi2 and 32x24 MACs */
+#define XCHAL_HAVE_HIFI2EP 1 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+
+
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector or user floating-point pkg */
+#define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_USER_SPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */
+
+#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */
+#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 0 /* DFPU Coprocessor, single and double precision */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_PDX4 0 /* PDX4 */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+#define XCHAL_HAVE_GRIVPEP 0 /* GRIVPEP is General Release of IVPEP */
+#define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 8 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+#define XCHAL_CLOCK_GATING_GLOBAL 0 /* global clock gating */
+#define XCHAL_CLOCK_GATING_FUNCUNIT 0 /* funct. unit clock gating */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 1100002 /* sw version of this header */
+
+#define XCHAL_CORE_ID "test_kc705_be" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00058D8C /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC1B3FFFF /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x1C858D8C /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX6.0.2" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 260002 /* major*100+minor */
+#define XCHAL_HW_REL_LX6 1
+#define XCHAL_HW_REL_LX6_0 1
+#define XCHAL_HW_REL_LX6_0_2 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2600 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 260002 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2600 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 260002 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 1 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 1 /* dcache pref. castout bufsz */
+#define XCHAL_PREFETCH_ENTRIES 8 /* cache prefetch entries */
+#define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */
+#define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */
+#define XCHAL_HAVE_ICACHE_TEST 1 /* Icache test instructions */
+#define XCHAL_HAVE_DCACHE_TEST 1 /* Dcache test instructions */
+#define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */
+#define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 8
+#define XCHAL_DCACHE_ACCESS_SIZE 8
+
+#define XCHAL_DCACHE_BANKS 1 /* number of banks */
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+/* Whether MEMCTL register has anything useful */
+#define XCHAL_USE_MEMCTL (((XCHAL_LOOP_BUFFER_SIZE > 0) || \
+ XCHAL_DCACHE_IS_COHERENT || \
+ XCHAL_HAVE_ICACHE_DYN_WAYS || \
+ XCHAL_HAVE_DCACHE_DYN_WAYS) && \
+ (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0))
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 16 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 4 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_MASK 0x00000140
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00008000
+#define XCHAL_INTLEVEL5_MASK 0x00003000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F01FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F0FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 2
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 5
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 4
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_PROFILING
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F0000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00008000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+#define XCHAL_PROFILING_INTERRUPT 15 /* profiling interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL4_NUM 15
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 2, 3, 5.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 5) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 21 /* (intlevel 3) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */
+#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */
+#define XCHAL_INT12_EXTNUM 8 /* (intlevel 5) */
+#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */
+#define XCHAL_INT16_EXTNUM 10 /* (intlevel 1) */
+#define XCHAL_INT17_EXTNUM 11 /* (intlevel 1) */
+#define XCHAL_INT18_EXTNUM 12 /* (intlevel 1) */
+#define XCHAL_INT19_EXTNUM 13 /* (intlevel 1) */
+#define XCHAL_INT20_EXTNUM 14 /* (intlevel 1) */
+#define XCHAL_INT21_EXTNUM 15 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 1 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 1 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 262144 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 1 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 8 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h b/arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h
new file mode 100644
index 000000000..b87fe1a51
--- /dev/null
+++ b/arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h
@@ -0,0 +1,308 @@
+/*
+ * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file contains assembly-language definitions (assembly
+ macros, etc.) for this specific Xtensa processor's TIE extensions
+ and options. It is customized to this Xtensa processor configuration.
+
+ Copyright (c) 1999-2015 Cadence Design Systems Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
+ | ((ccuse) & XTHAL_SAS_ANYCC) \
+ | ((abi) & XTHAL_SAS_ANYABI) )
+
+
+ /*
+ * Macro to store all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger store sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to store. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to store, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any store.
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ rur.THREADPTR \at1 // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ rsr.ACCLO \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.ACCHI \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ rsr.BR \at1 // boolean option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.SCOMPARE1 \at1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rsr.M0 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rsr.M1 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rsr.M2 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ rsr.M3 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+20
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .endif
+ .endm // xchal_ncp_store
+
+ /*
+ * Macro to load all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger load sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to load. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to load, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any load.
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.THREADPTR \at1 // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.ACCLO \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.ACCHI \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.BR \at1 // boolean option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.SCOMPARE1 \at1 // conditional store option
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wsr.M0 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wsr.M1 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wsr.M2 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+20
+ wsr.M3 \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .endif
+ .endm // xchal_ncp_load
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+ /*
+ * Macro to store the state of TIE coprocessor AudioEngineLX.
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 8 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_CP1_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters are the same as for xchal_ncp_store.
+ */
+#define xchal_cp_AudioEngineLX_store xchal_cp1_store
+ .macro xchal_cp1_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Custom caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ rur.AE_OVF_SAR \at1 // ureg 240
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rur.AE_BITHEAD \at1 // ureg 241
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rur.AE_TS_FTS_BU_BP \at1 // ureg 242
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rur.AE_SD_NO \at1 // ureg 243
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rur.AE_CBEGIN0 \at1 // ureg 246
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ rur.AE_CEND0 \at1 // ureg 247
+ s32i \at1, \ptr, .Lxchal_ofs_+20
+ ae_sp24x2s.i aep0, \ptr, .Lxchal_ofs_+24
+ ae_sp24x2s.i aep1, \ptr, .Lxchal_ofs_+32
+ ae_sp24x2s.i aep2, \ptr, .Lxchal_ofs_+40
+ ae_sp24x2s.i aep3, \ptr, .Lxchal_ofs_+48
+ ae_sp24x2s.i aep4, \ptr, .Lxchal_ofs_+56
+ addi \ptr, \ptr, 64
+ ae_sp24x2s.i aep5, \ptr, .Lxchal_ofs_+0
+ ae_sp24x2s.i aep6, \ptr, .Lxchal_ofs_+8
+ ae_sp24x2s.i aep7, \ptr, .Lxchal_ofs_+16
+ ae_sq56s.i aeq0, \ptr, .Lxchal_ofs_+24
+ ae_sq56s.i aeq1, \ptr, .Lxchal_ofs_+32
+ ae_sq56s.i aeq2, \ptr, .Lxchal_ofs_+40
+ ae_sq56s.i aeq3, \ptr, .Lxchal_ofs_+48
+ .set .Lxchal_pofs_, .Lxchal_pofs_ + 64
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 56
+ .elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 120
+ .endif
+ .endm // xchal_cp1_store
+
+ /*
+ * Macro to load the state of TIE coprocessor AudioEngineLX.
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 8 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_CP1_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters are the same as for xchal_ncp_load.
+ */
+#define xchal_cp_AudioEngineLX_load xchal_cp1_load
+ .macro xchal_cp1_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Custom caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.AE_OVF_SAR \at1 // ureg 240
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wur.AE_BITHEAD \at1 // ureg 241
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wur.AE_TS_FTS_BU_BP \at1 // ureg 242
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wur.AE_SD_NO \at1 // ureg 243
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wur.AE_CBEGIN0 \at1 // ureg 246
+ l32i \at1, \ptr, .Lxchal_ofs_+20
+ wur.AE_CEND0 \at1 // ureg 247
+ ae_lp24x2.i aep0, \ptr, .Lxchal_ofs_+24
+ ae_lp24x2.i aep1, \ptr, .Lxchal_ofs_+32
+ ae_lp24x2.i aep2, \ptr, .Lxchal_ofs_+40
+ ae_lp24x2.i aep3, \ptr, .Lxchal_ofs_+48
+ ae_lp24x2.i aep4, \ptr, .Lxchal_ofs_+56
+ addi \ptr, \ptr, 64
+ ae_lp24x2.i aep5, \ptr, .Lxchal_ofs_+0
+ ae_lp24x2.i aep6, \ptr, .Lxchal_ofs_+8
+ ae_lp24x2.i aep7, \ptr, .Lxchal_ofs_+16
+ addi \ptr, \ptr, 24
+ ae_lq56.i aeq0, \ptr, .Lxchal_ofs_+0
+ ae_lq56.i aeq1, \ptr, .Lxchal_ofs_+8
+ ae_lq56.i aeq2, \ptr, .Lxchal_ofs_+16
+ ae_lq56.i aeq3, \ptr, .Lxchal_ofs_+24
+ .set .Lxchal_pofs_, .Lxchal_pofs_ + 88
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 32
+ .elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 120
+ .endif
+ .endm // xchal_cp1_load
+
+#define XCHAL_CP1_NUM_ATMPS 1
+#define XCHAL_SA_NUM_ATMPS 1
+
+ /* Empty macros for unconfigured coprocessors: */
+ .macro xchal_cp0_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp0_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/test_kc705_be/include/variant/tie.h b/arch/xtensa/variants/test_kc705_be/include/variant/tie.h
new file mode 100644
index 000000000..4e1b4baac
--- /dev/null
+++ b/arch/xtensa/variants/test_kc705_be/include/variant/tie.h
@@ -0,0 +1,182 @@
+/*
+ * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file describes this specific Xtensa processor's TIE extensions
+ that extend basic Xtensa core functionality. It is customized to this
+ Xtensa processor configuration.
+
+ Copyright (c) 1999-2015 Cadence Design Systems Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 2 /* number of coprocessors */
+#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x82 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
+
+/* Basic parameters of each coprocessor: */
+#define XCHAL_CP1_NAME "AudioEngineLX"
+#define XCHAL_CP1_IDENT AudioEngineLX
+#define XCHAL_CP1_SA_SIZE 120 /* size of state save area */
+#define XCHAL_CP1_SA_ALIGN 8 /* min alignment of save area */
+#define XCHAL_CP_ID_AUDIOENGINELX 1 /* coprocessor ID (0..7) */
+#define XCHAL_CP7_NAME "XTIOP"
+#define XCHAL_CP7_IDENT XTIOP
+#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
+#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 36
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 160 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 8 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 9
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, br, 4, 4, 4,0x0204, sr,4 , 16,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 18
+#define XCHAL_CP1_SA_LIST(s) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_ovf_sar, 8, 4, 4,0x03F0, ur,240, 7,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_bithead, 4, 4, 4,0x03F1, ur,241, 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0,ae_ts_fts_bu_bp, 4, 4, 4,0x03F2, ur,242, 16,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_sd_no, 4, 4, 4,0x03F3, ur,243, 28,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_cbegin0, 4, 4, 4,0x03F6, ur,246, 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_cend0, 4, 4, 4,0x03F7, ur,247, 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep0, 8, 8, 8,0x0060, aep,0 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep1, 8, 8, 8,0x0061, aep,1 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep2, 8, 8, 8,0x0062, aep,2 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep3, 8, 8, 8,0x0063, aep,3 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep4, 8, 8, 8,0x0064, aep,4 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep5, 8, 8, 8,0x0065, aep,5 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep6, 8, 8, 8,0x0066, aep,6 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep7, 8, 8, 8,0x0067, aep,7 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq0, 8, 8, 8,0x0068, aeq,0 , 56,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq1, 8, 8, 8,0x0069, aeq,1 , 56,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq2, 8, 8, 8,0x006A, aeq,2 , 56,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq3, 8, 8, 8,0x006B, aeq,3 , 56,0,0,0)
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,8
+/* Byte length of instruction from its first byte, per FLIX. */
+#define XCHAL_BYTE0_FORMAT_LENGTHS \
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,\
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,\
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,\
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/arch/xtensa/variants/test_kc705_hifi/include/variant/core.h b/arch/xtensa/variants/test_kc705_hifi/include/variant/core.h
new file mode 100644
index 000000000..4a2222979
--- /dev/null
+++ b/arch/xtensa/variants/test_kc705_hifi/include/variant/core.h
@@ -0,0 +1,531 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2014 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 1 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3 1 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 8 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 1000004 /* sw version of this header */
+
+#define XCHAL_CORE_ID "test_kc705_hifi" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x0004983D /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC1B3FFFE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x1904983D /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX5.0.4" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2500 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 4 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 250004 /* major*100+minor */
+#define XCHAL_HW_REL_LX5 1
+#define XCHAL_HW_REL_LX5_0 1
+#define XCHAL_HW_REL_LX5_0_4 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2500 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 4 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 250004 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2500 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 4 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 250004 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 1 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 1 /* dcache pref. castout bufsz */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 8
+#define XCHAL_DCACHE_ACCESS_SIZE 8
+
+#define XCHAL_DCACHE_BANKS 1 /* number of banks */
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 16 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_MASK 0x00000140
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00009000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F01FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F0FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 2
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 4
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_PROFILING
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F0000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00008000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+#define XCHAL_PROFILING_INTERRUPT 15 /* profiling interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 2, 3, 4.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 21 /* (intlevel 3) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */
+#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */
+#define XCHAL_INT12_EXTNUM 8 /* (intlevel 4) */
+#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */
+#define XCHAL_INT16_EXTNUM 10 /* (intlevel 1) */
+#define XCHAL_INT17_EXTNUM 11 /* (intlevel 1) */
+#define XCHAL_INT18_EXTNUM 12 /* (intlevel 1) */
+#define XCHAL_INT19_EXTNUM 13 /* (intlevel 1) */
+#define XCHAL_INT20_EXTNUM 14 /* (intlevel 1) */
+#define XCHAL_INT21_EXTNUM 15 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 1 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 1 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 262144 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 1 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 8 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
diff --git a/arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h b/arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h
new file mode 100644
index 000000000..378163c0a
--- /dev/null
+++ b/arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h
@@ -0,0 +1,328 @@
+/*
+ * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file contains assembly-language definitions (assembly
+ macros, etc.) for this specific Xtensa processor's TIE extensions
+ and options. It is customized to this Xtensa processor configuration.
+
+ Copyright (c) 1999-2014 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
+ | ((ccuse) & XTHAL_SAS_ANYCC) \
+ | ((abi) & XTHAL_SAS_ANYABI) )
+
+
+
+ /*
+ * Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger store sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to store. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to store, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any store.
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global register used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ rur.THREADPTR \at1 // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ rsr.ACCLO \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.ACCHI \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ rsr.M0 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr.M1 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rsr.M2 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rsr.M3 \at1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rsr.BR \at1 // boolean option
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ rsr.SCOMPARE1 \at1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_+20
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .endif
+ .endm // xchal_ncp_store
+
+ /*
+ * Macro to restore all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger load sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to load. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to load, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any load.
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global register used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.THREADPTR \at1 // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.ACCLO \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.ACCHI \at1 // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr.M0 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr.M1 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wsr.M2 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wsr.M3 \at1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wsr.BR \at1 // boolean option
+ l32i \at1, \ptr, .Lxchal_ofs_+20
+ wsr.SCOMPARE1 \at1 // conditional store option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1000, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 24
+ .endif
+ .endm // xchal_ncp_load
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+
+
+
+ /*
+ * Macro to save the state of TIE coprocessor AudioEngineLX.
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 8 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_CP1_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters are the same as for xchal_ncp_store.
+ */
+#define xchal_cp_AudioEngineLX_store xchal_cp1_store
+ .macro xchal_cp1_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Custom caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ rur.AE_OVF_SAR \at1 // ureg 240
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rur.AE_BITHEAD \at1 // ureg 241
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rur.AE_TS_FTS_BU_BP \at1 // ureg 242
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rur.AE_CW_SD_NO \at1 // ureg 243
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rur.AE_CBEGIN0 \at1 // ureg 246
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ rur.AE_CEND0 \at1 // ureg 247
+ s32i \at1, \ptr, .Lxchal_ofs_+20
+ AE_S64.I aed0, \ptr, .Lxchal_ofs_+24
+ AE_S64.I aed1, \ptr, .Lxchal_ofs_+32
+ AE_S64.I aed2, \ptr, .Lxchal_ofs_+40
+ AE_S64.I aed3, \ptr, .Lxchal_ofs_+48
+ AE_S64.I aed4, \ptr, .Lxchal_ofs_+56
+ addi \ptr, \ptr, 64
+ AE_S64.I aed5, \ptr, .Lxchal_ofs_+0
+ AE_S64.I aed6, \ptr, .Lxchal_ofs_+8
+ AE_S64.I aed7, \ptr, .Lxchal_ofs_+16
+ AE_S64.I aed8, \ptr, .Lxchal_ofs_+24
+ AE_S64.I aed9, \ptr, .Lxchal_ofs_+32
+ AE_S64.I aed10, \ptr, .Lxchal_ofs_+40
+ AE_S64.I aed11, \ptr, .Lxchal_ofs_+48
+ AE_S64.I aed12, \ptr, .Lxchal_ofs_+56
+ addi \ptr, \ptr, 64
+ AE_S64.I aed13, \ptr, .Lxchal_ofs_+0
+ AE_S64.I aed14, \ptr, .Lxchal_ofs_+8
+ AE_S64.I aed15, \ptr, .Lxchal_ofs_+16
+ AE_SALIGN64.I u0, \ptr, .Lxchal_ofs_+24
+ AE_SALIGN64.I u1, \ptr, .Lxchal_ofs_+32
+ AE_SALIGN64.I u2, \ptr, .Lxchal_ofs_+40
+ AE_SALIGN64.I u3, \ptr, .Lxchal_ofs_+48
+ .set .Lxchal_pofs_, .Lxchal_pofs_ + 128
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 56
+ .elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 184
+ .endif
+ .endm // xchal_cp1_store
+
+ /*
+ * Macro to restore the state of TIE coprocessor AudioEngineLX.
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 8 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_CP1_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters are the same as for xchal_ncp_load.
+ */
+#define xchal_cp_AudioEngineLX_load xchal_cp1_load
+ .macro xchal_cp1_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Custom caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.AE_OVF_SAR \at1 // ureg 240
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wur.AE_BITHEAD \at1 // ureg 241
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wur.AE_TS_FTS_BU_BP \at1 // ureg 242
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wur.AE_CW_SD_NO \at1 // ureg 243
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wur.AE_CBEGIN0 \at1 // ureg 246
+ l32i \at1, \ptr, .Lxchal_ofs_+20
+ wur.AE_CEND0 \at1 // ureg 247
+ AE_L64.I aed0, \ptr, .Lxchal_ofs_+24
+ AE_L64.I aed1, \ptr, .Lxchal_ofs_+32
+ AE_L64.I aed2, \ptr, .Lxchal_ofs_+40
+ AE_L64.I aed3, \ptr, .Lxchal_ofs_+48
+ AE_L64.I aed4, \ptr, .Lxchal_ofs_+56
+ addi \ptr, \ptr, 64
+ AE_L64.I aed5, \ptr, .Lxchal_ofs_+0
+ AE_L64.I aed6, \ptr, .Lxchal_ofs_+8
+ AE_L64.I aed7, \ptr, .Lxchal_ofs_+16
+ AE_L64.I aed8, \ptr, .Lxchal_ofs_+24
+ AE_L64.I aed9, \ptr, .Lxchal_ofs_+32
+ AE_L64.I aed10, \ptr, .Lxchal_ofs_+40
+ AE_L64.I aed11, \ptr, .Lxchal_ofs_+48
+ AE_L64.I aed12, \ptr, .Lxchal_ofs_+56
+ addi \ptr, \ptr, 64
+ AE_L64.I aed13, \ptr, .Lxchal_ofs_+0
+ AE_L64.I aed14, \ptr, .Lxchal_ofs_+8
+ AE_L64.I aed15, \ptr, .Lxchal_ofs_+16
+ AE_LALIGN64.I u0, \ptr, .Lxchal_ofs_+24
+ AE_LALIGN64.I u1, \ptr, .Lxchal_ofs_+32
+ AE_LALIGN64.I u2, \ptr, .Lxchal_ofs_+40
+ AE_LALIGN64.I u3, \ptr, .Lxchal_ofs_+48
+ .set .Lxchal_pofs_, .Lxchal_pofs_ + 128
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 56
+ .elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 0, 8, 8
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 184
+ .endif
+ .endm // xchal_cp1_load
+
+#define XCHAL_CP1_NUM_ATMPS 1
+#define XCHAL_SA_NUM_ATMPS 1
+
+ /* Empty macros for unconfigured coprocessors: */
+ .macro xchal_cp0_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp0_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
diff --git a/arch/xtensa/variants/test_kc705_hifi/include/variant/tie.h b/arch/xtensa/variants/test_kc705_hifi/include/variant/tie.h
new file mode 100644
index 000000000..e39fea643
--- /dev/null
+++ b/arch/xtensa/variants/test_kc705_hifi/include/variant/tie.h
@@ -0,0 +1,189 @@
+/*
+ * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file describes this specific Xtensa processor's TIE extensions
+ that extend basic Xtensa core functionality. It is customized to this
+ Xtensa processor configuration.
+
+ Copyright (c) 1999-2014 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 2 /* number of coprocessors */
+#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x82 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
+
+/* Basic parameters of each coprocessor: */
+#define XCHAL_CP1_NAME "AudioEngineLX"
+#define XCHAL_CP1_IDENT AudioEngineLX
+#define XCHAL_CP1_SA_SIZE 184 /* size of state save area */
+#define XCHAL_CP1_SA_ALIGN 8 /* min alignment of save area */
+#define XCHAL_CP_ID_AUDIOENGINELX 1 /* coprocessor ID (0..7) */
+#define XCHAL_CP7_NAME "XTIOP"
+#define XCHAL_CP7_IDENT XTIOP
+#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
+#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 36
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 240 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 8 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 9
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, br, 4, 4, 4,0x0204, sr,4 , 16,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 26
+#define XCHAL_CP1_SA_LIST(s) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_ovf_sar, 8, 4, 4,0x03F0, ur,240, 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_bithead, 4, 4, 4,0x03F1, ur,241, 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0,ae_ts_fts_bu_bp, 4, 4, 4,0x03F2, ur,242, 16,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_cw_sd_no, 4, 4, 4,0x03F3, ur,243, 29,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_cbegin0, 4, 4, 4,0x03F6, ur,246, 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_cend0, 4, 4, 4,0x03F7, ur,247, 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed0, 8, 8, 8,0x1010, aed,0 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed1, 8, 8, 8,0x1011, aed,1 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed2, 8, 8, 8,0x1012, aed,2 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed3, 8, 8, 8,0x1013, aed,3 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed4, 8, 8, 8,0x1014, aed,4 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed5, 8, 8, 8,0x1015, aed,5 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed6, 8, 8, 8,0x1016, aed,6 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed7, 8, 8, 8,0x1017, aed,7 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed8, 8, 8, 8,0x1018, aed,8 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed9, 8, 8, 8,0x1019, aed,9 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed10, 8, 8, 8,0x101A, aed,10 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed11, 8, 8, 8,0x101B, aed,11 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed12, 8, 8, 8,0x101C, aed,12 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed13, 8, 8, 8,0x101D, aed,13 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed14, 8, 8, 8,0x101E, aed,14 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aed15, 8, 8, 8,0x101F, aed,15 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, u0, 8, 8, 8,0x1020, u,0 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, u1, 8, 8, 8,0x1021, u,1 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, u2, 8, 8, 8,0x1022, u,2 , 64,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, u3, 8, 8, 8,0x1023, u,3 , 64,0,0,0)
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8
+/* Byte length of instruction from its first byte, per FLIX. */
+#define XCHAL_BYTE0_FORMAT_LENGTHS \
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8,\
+ 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,8,8
+
+#endif /*_XTENSA_CORE_TIE_H*/
diff --git a/arch/xtensa/variants/test_mmuhifi_c3/include/variant/core.h b/arch/xtensa/variants/test_mmuhifi_c3/include/variant/core.h
new file mode 100644
index 000000000..7c5d3a22d
--- /dev/null
+++ b/arch/xtensa/variants/test_mmuhifi_c3/include/variant/core.h
@@ -0,0 +1,383 @@
+/*
+ * Xtensa processor core configuration information.
+ *
+ * This file is subject to the terms and conditions of version 2.1 of the GNU
+ * Lesser General Public License as published by the Free Software Foundation.
+ *
+ * Copyright (c) 1999-2009 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 0 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MP_INTERRUPTS 1 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 1 /* core RunStall control port */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 2 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2 1 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 8 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 800000 /* sw version of this header */
+
+#define XCHAL_CORE_ID "test_mmuhifi_c3" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "test_mmuhifi_c3"
+#define XCHAL_BUILD_UNIQUE_ID 0x00005A6A /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC1B3CBFE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x10405A6A /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX3.0.0" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2300 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 0 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 230000 /* major*100+minor */
+#define XCHAL_HW_REL_LX3 1
+#define XCHAL_HW_REL_LX3_0 1
+#define XCHAL_HW_REL_LX3_0_0 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2300 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 0 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 230000 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2300 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 0 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 230000 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 1 /* MP coherence feature */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 8
+#define XCHAL_DCACHE_SETWIDTH 8
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 0
+#define XCHAL_DCACHE_LINE_LOCKABLE 0
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 8
+#define XCHAL_DCACHE_ACCESS_SIZE 8
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 0 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 2 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 12 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 4 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 9 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 2 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x00000FFF
+#define XCHAL_INTLEVEL2_MASK 0x00000000
+#define XCHAL_INTLEVEL3_MASK 0x00000000
+#define XCHAL_INTLEVEL4_MASK 0x00000000
+#define XCHAL_INTLEVEL5_MASK 0x00000000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00000000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x00000FFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 1
+#define XCHAL_INT9_LEVEL 1
+#define XCHAL_INT10_LEVEL 1
+#define XCHAL_INT11_LEVEL 1
+#define XCHAL_DEBUGLEVEL 2 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFFF000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000080
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000004
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x00000E3B
+#define XCHAL_INTTYPE_MASK_TIMER 0x00000140
+#define XCHAL_INTTYPE_MASK_NMI 0x00000000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 8 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+/* (There are many interrupts each at level(s) 1.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 9 /* (intlevel 1) */
+#define XCHAL_EXTINT7_NUM 10 /* (intlevel 1) */
+#define XCHAL_EXTINT8_NUM 11 /* (intlevel 1) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0xD0000000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00000000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0xD8000500
+#define XCHAL_RESET_VECTOR1_PADDR 0x00000500
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0xD0000340
+#define XCHAL_USER_VECTOR_PADDR 0x00000340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD00003C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000003C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000280
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000280
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL2_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL2_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 0 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 0 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
diff --git a/arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h b/arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h
new file mode 100644
index 000000000..6e9d69caf
--- /dev/null
+++ b/arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h
@@ -0,0 +1,182 @@
+/*
+ * This header file contains assembly-language definitions (assembly
+ * macros, etc.) for this specific Xtensa processor's TIE extensions
+ * and options. It is customized to this Xtensa processor configuration.
+ *
+ * This file is subject to the terms and conditions of version 2.1 of the GNU
+ * Lesser General Public License as published by the Free Software Foundation.
+ *
+ * Copyright (C) 1999-2009 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+
+
+
+/* Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Save area ptr (clobbered): ptr (8 byte aligned)
+ * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
+ xchal_sa_start \continue, \ofs
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ rsr \at1, BR // boolean option
+ s32i \at1, \ptr, .Lxchal_ofs_ + 0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ rsr \at1, SCOMPARE1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_ + 0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ rur \at1, THREADPTR // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_ + 0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .endm // xchal_ncp_store
+
+/* Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Save area ptr (clobbered): ptr (8 byte aligned)
+ * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
+ xchal_sa_start \continue, \ofs
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_ + 0
+ wsr \at1, BR // boolean option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_ + 0
+ wsr \at1, SCOMPARE1 // conditional store option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
+ xchal_sa_align \ptr, 0, 1024-4, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_ + 0
+ wur \at1, THREADPTR // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ .endm // xchal_ncp_load
+
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+
+
+/* Macro to save the state of TIE coprocessor AudioEngineLX.
+ * Save area ptr (clobbered): ptr (8 byte aligned)
+ * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP1_NUM_ATMPS needed)
+ */
+#define xchal_cp_AudioEngineLX_store xchal_cp1_store
+/* #define xchal_cp_AudioEngineLX_store_a2 xchal_cp1_store a2 a3 a4 a5 a6 */
+ .macro xchal_cp1_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
+ xchal_sa_start \continue, \ofs
+ .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 0, 1, 8
+ rur240 \at1 // AE_OVF_SAR
+ s32i \at1, \ptr, 0
+ rur241 \at1 // AE_BITHEAD
+ s32i \at1, \ptr, 4
+ rur242 \at1 // AE_TS_FTS_BU_BP
+ s32i \at1, \ptr, 8
+ rur243 \at1 // AE_SD_NO
+ s32i \at1, \ptr, 12
+ AE_SP24X2S.I aep0, \ptr, 16
+ AE_SP24X2S.I aep1, \ptr, 24
+ AE_SP24X2S.I aep2, \ptr, 32
+ AE_SP24X2S.I aep3, \ptr, 40
+ AE_SP24X2S.I aep4, \ptr, 48
+ AE_SP24X2S.I aep5, \ptr, 56
+ addi \ptr, \ptr, 64
+ AE_SP24X2S.I aep6, \ptr, 0
+ AE_SP24X2S.I aep7, \ptr, 8
+ AE_SQ56S.I aeq0, \ptr, 16
+ AE_SQ56S.I aeq1, \ptr, 24
+ AE_SQ56S.I aeq2, \ptr, 32
+ AE_SQ56S.I aeq3, \ptr, 40
+ .set .Lxchal_pofs_, .Lxchal_pofs_ + 64
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 112
+ .endif
+ .endm // xchal_cp1_store
+
+/* Macro to restore the state of TIE coprocessor AudioEngineLX.
+ * Save area ptr (clobbered): ptr (8 byte aligned)
+ * Scratch regs (clobbered): at1..at4 (only first XCHAL_CP1_NUM_ATMPS needed)
+ */
+#define xchal_cp_AudioEngineLX_load xchal_cp1_load
+/* #define xchal_cp_AudioEngineLX_load_a2 xchal_cp1_load a2 a3 a4 a5 a6 */
+ .macro xchal_cp1_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
+ xchal_sa_start \continue, \ofs
+ .ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
+ xchal_sa_align \ptr, 0, 0, 1, 8
+ l32i \at1, \ptr, 0
+ wur240 \at1 // AE_OVF_SAR
+ l32i \at1, \ptr, 4
+ wur241 \at1 // AE_BITHEAD
+ l32i \at1, \ptr, 8
+ wur242 \at1 // AE_TS_FTS_BU_BP
+ l32i \at1, \ptr, 12
+ wur243 \at1 // AE_SD_NO
+ addi \ptr, \ptr, 80
+ AE_LQ56.I aeq0, \ptr, 0
+ AE_LQ56.I aeq1, \ptr, 8
+ AE_LQ56.I aeq2, \ptr, 16
+ AE_LQ56.I aeq3, \ptr, 24
+ AE_LP24X2.I aep0, \ptr, -64
+ AE_LP24X2.I aep1, \ptr, -56
+ AE_LP24X2.I aep2, \ptr, -48
+ AE_LP24X2.I aep3, \ptr, -40
+ AE_LP24X2.I aep4, \ptr, -32
+ AE_LP24X2.I aep5, \ptr, -24
+ AE_LP24X2.I aep6, \ptr, -16
+ AE_LP24X2.I aep7, \ptr, -8
+ .set .Lxchal_pofs_, .Lxchal_pofs_ + 80
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 112
+ .endif
+ .endm // xchal_cp1_load
+
+#define XCHAL_CP1_NUM_ATMPS 1
+#define XCHAL_SA_NUM_ATMPS 1
+
+ /* Empty macros for unconfigured coprocessors: */
+ .macro xchal_cp0_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp0_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
+ .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
diff --git a/arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie.h b/arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie.h
new file mode 100644
index 000000000..fb1f5f004
--- /dev/null
+++ b/arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie.h
@@ -0,0 +1,140 @@
+/*
+ * This header file describes this specific Xtensa processor's TIE extensions
+ * that extend basic Xtensa core functionality. It is customized to this
+ * Xtensa processor configuration.
+ *
+ * This file is subject to the terms and conditions of version 2.1 of the GNU
+ * Lesser General Public License as published by the Free Software Foundation.
+ *
+ * Copyright (C) 1999-2009 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 1 /* number of coprocessors */
+#define XCHAL_CP_MAX 2 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x02 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
+
+/* Basic parameters of each coprocessor: */
+#define XCHAL_CP1_NAME "AudioEngineLX"
+#define XCHAL_CP1_IDENT AudioEngineLX
+#define XCHAL_CP1_SA_SIZE 112 /* size of state save area */
+#define XCHAL_CP1_SA_ALIGN 8 /* min alignment of save area */
+#define XCHAL_CP_ID_AUDIOENGINELX 1 /* coprocessor ID (0..7) */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+#define XCHAL_CP7_SA_SIZE 0
+#define XCHAL_CP7_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 12
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 128 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 8 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 3
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,0,0,0,1, br, 4, 4, 4,0x0204, sr,4 , 16,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 16
+#define XCHAL_CP1_SA_LIST(s) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_ovf_sar, 8, 4, 4,0x03F0, ur,240, 7,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_bithead, 4, 4, 4,0x03F1, ur,241, 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0,ae_ts_fts_bu_bp, 4, 4, 4,0x03F2, ur,242, 16,0,0,0) \
+ XCHAL_SA_REG(s,0,0,1,0, ae_sd_no, 4, 4, 4,0x03F3, ur,243, 28,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep0, 8, 8, 8,0x0060, aep,0 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep1, 8, 8, 8,0x0061, aep,1 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep2, 8, 8, 8,0x0062, aep,2 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep3, 8, 8, 8,0x0063, aep,3 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep4, 8, 8, 8,0x0064, aep,4 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep5, 8, 8, 8,0x0065, aep,5 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep6, 8, 8, 8,0x0066, aep,6 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aep7, 8, 8, 8,0x0067, aep,7 , 48,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq0, 8, 8, 8,0x0068, aeq,0 , 56,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq1, 8, 8, 8,0x0069, aeq,1 , 56,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq2, 8, 8, 8,0x006A, aeq,2 , 56,0,0,0) \
+ XCHAL_SA_REG(s,0,0,2,0, aeq3, 8, 8, 8,0x006B, aeq,3 , 56,0,0,0)
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,8
+
+#endif /*_XTENSA_CORE_TIE_H*/