summaryrefslogtreecommitdiffstats
path: root/arch/arc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /arch/arc
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--arch/arc/Kbuild6
-rw-r--r--arch/arc/Kconfig566
-rw-r--r--arch/arc/Kconfig.debug10
-rw-r--r--arch/arc/Makefile112
-rw-r--r--arch/arc/boot/.gitignore2
-rw-r--r--arch/arc/boot/Makefile38
-rw-r--r--arch/arc/boot/dts/Makefile17
-rw-r--r--arch/arc/boot/dts/abilis_tb100.dtsi336
-rw-r--r--arch/arc/boot/dts/abilis_tb100_dvk.dts116
-rw-r--r--arch/arc/boot/dts/abilis_tb101.dtsi345
-rw-r--r--arch/arc/boot/dts/abilis_tb101_dvk.dts116
-rw-r--r--arch/arc/boot/dts/abilis_tb10x.dtsi243
-rw-r--r--arch/arc/boot/dts/axc001.dtsi126
-rw-r--r--arch/arc/boot/dts/axc003.dtsi161
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi167
-rw-r--r--arch/arc/boot/dts/axs101.dts19
-rw-r--r--arch/arc/boot/dts/axs103.dts22
-rw-r--r--arch/arc/boot/dts/axs103_idu.dts22
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi330
-rw-r--r--arch/arc/boot/dts/haps_hs.dts99
-rw-r--r--arch/arc/boot/dts/haps_hs_idu.dts74
-rw-r--r--arch/arc/boot/dts/hsdk.dts351
-rw-r--r--arch/arc/boot/dts/nsim_700.dts59
-rw-r--r--arch/arc/boot/dts/nsimosci.dts88
-rw-r--r--arch/arc/boot/dts/nsimosci_hs.dts90
-rw-r--r--arch/arc/boot/dts/nsimosci_hs_idu.dts98
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi48
-rw-r--r--arch/arc/boot/dts/skeleton_hs.dtsi49
-rw-r--r--arch/arc/boot/dts/skeleton_hs_idu.dtsi61
-rw-r--r--arch/arc/boot/dts/vdk_axc003.dtsi65
-rw-r--r--arch/arc/boot/dts/vdk_axc003_idu.dtsi73
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi126
-rw-r--r--arch/arc/boot/dts/vdk_hs38.dts19
-rw-r--r--arch/arc/boot/dts/vdk_hs38_smp.dts19
-rw-r--r--arch/arc/configs/axs101_defconfig104
-rw-r--r--arch/arc/configs/axs103_defconfig102
-rw-r--r--arch/arc/configs/axs103_smp_defconfig104
-rw-r--r--arch/arc/configs/haps_hs_defconfig62
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig62
-rw-r--r--arch/arc/configs/hsdk_defconfig93
-rw-r--r--arch/arc/configs/nsim_700_defconfig59
-rw-r--r--arch/arc/configs/nsimosci_defconfig67
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig65
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig74
-rw-r--r--arch/arc/configs/tb10x_defconfig99
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig94
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig100
-rw-r--r--arch/arc/include/asm/Kbuild6
-rw-r--r--arch/arc/include/asm/arcregs.h370
-rw-r--r--arch/arc/include/asm/asm-offsets.h6
-rw-r--r--arch/arc/include/asm/asserts.h34
-rw-r--r--arch/arc/include/asm/atomic-llsc.h97
-rw-r--r--arch/arc/include/asm/atomic-spinlock.h111
-rw-r--r--arch/arc/include/asm/atomic.h36
-rw-r--r--arch/arc/include/asm/atomic64-arcv2.h235
-rw-r--r--arch/arc/include/asm/barrier.h44
-rw-r--r--arch/arc/include/asm/bitops.h197
-rw-r--r--arch/arc/include/asm/bug.h34
-rw-r--r--arch/arc/include/asm/cache.h128
-rw-r--r--arch/arc/include/asm/cacheflush.h111
-rw-r--r--arch/arc/include/asm/checksum.h98
-rw-r--r--arch/arc/include/asm/cmpxchg.h143
-rw-r--r--arch/arc/include/asm/current.h25
-rw-r--r--arch/arc/include/asm/delay.h69
-rw-r--r--arch/arc/include/asm/disasm.h113
-rw-r--r--arch/arc/include/asm/dma.h11
-rw-r--r--arch/arc/include/asm/dsp-impl.h152
-rw-r--r--arch/arc/include/asm/dsp.h29
-rw-r--r--arch/arc/include/asm/dwarf.h43
-rw-r--r--arch/arc/include/asm/elf.h72
-rw-r--r--arch/arc/include/asm/entry-arcv2.h294
-rw-r--r--arch/arc/include/asm/entry-compact.h302
-rw-r--r--arch/arc/include/asm/entry.h270
-rw-r--r--arch/arc/include/asm/exec.h12
-rw-r--r--arch/arc/include/asm/fb.h8
-rw-r--r--arch/arc/include/asm/fpu.h57
-rw-r--r--arch/arc/include/asm/futex.h169
-rw-r--r--arch/arc/include/asm/highmem.h53
-rw-r--r--arch/arc/include/asm/hugepage.h67
-rw-r--r--arch/arc/include/asm/io.h234
-rw-r--r--arch/arc/include/asm/irq.h30
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h175
-rw-r--r--arch/arc/include/asm/irqflags-compact.h201
-rw-r--r--arch/arc/include/asm/irqflags.h16
-rw-r--r--arch/arc/include/asm/jump_label.h72
-rw-r--r--arch/arc/include/asm/kdebug.h16
-rw-r--r--arch/arc/include/asm/kgdb.h60
-rw-r--r--arch/arc/include/asm/kprobes.h55
-rw-r--r--arch/arc/include/asm/linkage.h80
-rw-r--r--arch/arc/include/asm/mach_desc.h64
-rw-r--r--arch/arc/include/asm/mmu-arcv2.h103
-rw-r--r--arch/arc/include/asm/mmu.h23
-rw-r--r--arch/arc/include/asm/mmu_context.h174
-rw-r--r--arch/arc/include/asm/module.h21
-rw-r--r--arch/arc/include/asm/page.h138
-rw-r--r--arch/arc/include/asm/pci.h19
-rw-r--r--arch/arc/include/asm/perf_event.h70
-rw-r--r--arch/arc/include/asm/pgalloc.h97
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h147
-rw-r--r--arch/arc/include/asm/pgtable-levels.h188
-rw-r--r--arch/arc/include/asm/pgtable.h34
-rw-r--r--arch/arc/include/asm/processor.h104
-rw-r--r--arch/arc/include/asm/ptrace.h175
-rw-r--r--arch/arc/include/asm/sections.h13
-rw-r--r--arch/arc/include/asm/serial.h19
-rw-r--r--arch/arc/include/asm/setup.h45
-rw-r--r--arch/arc/include/asm/shmparam.h15
-rw-r--r--arch/arc/include/asm/smp.h130
-rw-r--r--arch/arc/include/asm/spinlock.h382
-rw-r--r--arch/arc/include/asm/spinlock_types.h34
-rw-r--r--arch/arc/include/asm/stacktrace.h34
-rw-r--r--arch/arc/include/asm/string.h34
-rw-r--r--arch/arc/include/asm/switch_to.h27
-rw-r--r--arch/arc/include/asm/syscall.h80
-rw-r--r--arch/arc/include/asm/syscalls.h22
-rw-r--r--arch/arc/include/asm/thread_info.h106
-rw-r--r--arch/arc/include/asm/timex.h15
-rw-r--r--arch/arc/include/asm/tlb.h12
-rw-r--r--arch/arc/include/asm/tlbflush.h42
-rw-r--r--arch/arc/include/asm/uaccess.h638
-rw-r--r--arch/arc/include/asm/unaligned.h27
-rw-r--r--arch/arc/include/asm/unwind.h156
-rw-r--r--arch/arc/include/asm/vermagic.h8
-rw-r--r--arch/arc/include/asm/vmalloc.h4
-rw-r--r--arch/arc/include/uapi/asm/Kbuild2
-rw-r--r--arch/arc/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--arch/arc/include/uapi/asm/byteorder.h19
-rw-r--r--arch/arc/include/uapi/asm/cachectl.h29
-rw-r--r--arch/arc/include/uapi/asm/elf.h35
-rw-r--r--arch/arc/include/uapi/asm/page.h36
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h58
-rw-r--r--arch/arc/include/uapi/asm/setup.h6
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h24
-rw-r--r--arch/arc/include/uapi/asm/signal.h28
-rw-r--r--arch/arc/include/uapi/asm/swab.h99
-rw-r--r--arch/arc/include/uapi/asm/unistd.h52
-rw-r--r--arch/arc/kernel/.gitignore2
-rw-r--r--arch/arc/kernel/Makefile29
-rw-r--r--arch/arc/kernel/arc_hostlink.c55
-rw-r--r--arch/arc/kernel/arcksyms.c54
-rw-r--r--arch/arc/kernel/asm-offsets.c84
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S64
-rw-r--r--arch/arc/kernel/devtree.c76
-rw-r--r--arch/arc/kernel/disasm.c594
-rw-r--r--arch/arc/kernel/entry-arcv2.S249
-rw-r--r--arch/arc/kernel/entry-compact.S388
-rw-r--r--arch/arc/kernel/entry.S347
-rw-r--r--arch/arc/kernel/fpu.c82
-rw-r--r--arch/arc/kernel/head.S173
-rw-r--r--arch/arc/kernel/intc-arcv2.c191
-rw-r--r--arch/arc/kernel/intc-compact.c170
-rw-r--r--arch/arc/kernel/irq.c51
-rw-r--r--arch/arc/kernel/jump_label.c157
-rw-r--r--arch/arc/kernel/kgdb.c206
-rw-r--r--arch/arc/kernel/kprobes.c415
-rw-r--r--arch/arc/kernel/mcip.c419
-rw-r--r--arch/arc/kernel/module.c146
-rw-r--r--arch/arc/kernel/perf_event.c850
-rw-r--r--arch/arc/kernel/process.c296
-rw-r--r--arch/arc/kernel/ptrace.c401
-rw-r--r--arch/arc/kernel/reset.c31
-rw-r--r--arch/arc/kernel/setup.c654
-rw-r--r--arch/arc/kernel/signal.c443
-rw-r--r--arch/arc/kernel/smp.c414
-rw-r--r--arch/arc/kernel/stacktrace.c275
-rw-r--r--arch/arc/kernel/sys.c18
-rw-r--r--arch/arc/kernel/traps.c158
-rw-r--r--arch/arc/kernel/troubleshoot.c228
-rw-r--r--arch/arc/kernel/unaligned.c262
-rw-r--r--arch/arc/kernel/unwind.c1315
-rw-r--r--arch/arc/kernel/vmlinux.lds.S155
-rw-r--r--arch/arc/lib/Makefile15
-rw-r--r--arch/arc/lib/memcmp.S149
-rw-r--r--arch/arc/lib/memcpy-700.S63
-rw-r--r--arch/arc/lib/memcpy-archs-unaligned.S47
-rw-r--r--arch/arc/lib/memcpy-archs.S219
-rw-r--r--arch/arc/lib/memset-archs.S144
-rw-r--r--arch/arc/lib/memset.S56
-rw-r--r--arch/arc/lib/strchr-700.S130
-rw-r--r--arch/arc/lib/strcmp-archs.S75
-rw-r--r--arch/arc/lib/strcmp.S93
-rw-r--r--arch/arc/lib/strcpy-700.S67
-rw-r--r--arch/arc/lib/strlen.S80
-rw-r--r--arch/arc/mm/Makefile8
-rw-r--r--arch/arc/mm/cache.c1218
-rw-r--r--arch/arc/mm/dma.c106
-rw-r--r--arch/arc/mm/extable.c24
-rw-r--r--arch/arc/mm/fault.c193
-rw-r--r--arch/arc/mm/highmem.c73
-rw-r--r--arch/arc/mm/init.c202
-rw-r--r--arch/arc/mm/ioremap.c64
-rw-r--r--arch/arc/mm/mmap.c96
-rw-r--r--arch/arc/mm/tlb.c761
-rw-r--r--arch/arc/mm/tlbex.S378
-rw-r--r--arch/arc/plat-axs10x/Kconfig46
-rw-r--r--arch/arc/plat-axs10x/Makefile6
-rw-r--r--arch/arc/plat-axs10x/axs10x.c384
-rw-r--r--arch/arc/plat-hsdk/Kconfig14
-rw-r--r--arch/arc/plat-hsdk/Makefile6
-rw-r--r--arch/arc/plat-hsdk/platform.c326
-rw-r--r--arch/arc/plat-sim/Makefile6
-rw-r--r--arch/arc/plat-sim/platform.c32
-rw-r--r--arch/arc/plat-tb10x/Kconfig20
-rw-r--r--arch/arc/plat-tb10x/Makefile10
-rw-r--r--arch/arc/plat-tb10x/tb10x.c20
205 files changed, 27673 insertions, 0 deletions
diff --git a/arch/arc/Kbuild b/arch/arc/Kbuild
new file mode 100644
index 0000000000..b94102fff6
--- /dev/null
+++ b/arch/arc/Kbuild
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += kernel/
+obj-y += mm/
+
+# for cleaning
+subdir- += boot
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
new file mode 100644
index 0000000000..3162db540e
--- /dev/null
+++ b/arch/arc/Kconfig
@@ -0,0 +1,566 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+
+config ARC
+ def_bool y
+ select ARC_TIMERS
+ select ARCH_HAS_CACHE_LINE_SIZE
+ select ARCH_HAS_DEBUG_VM_PGTABLE
+ select ARCH_HAS_DMA_PREP_COHERENT
+ select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SETUP_DMA_OPS
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
+ select ARCH_32BIT_OFF_T
+ select BUILDTIME_TABLE_SORT
+ select CLONE_BACKWARDS
+ select COMMON_CLK
+ select DMA_DIRECT_REMAP
+ select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
+ # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_SHOW
+ select GENERIC_PCI_IOMAP
+ select GENERIC_PENDING_IRQ if SMP
+ select GENERIC_SCHED_CLOCK
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_IOREMAP
+ select GENERIC_STRNCPY_FROM_USER if MMU
+ select GENERIC_STRNLEN_USER if MMU
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_IOREMAP_PROT
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZMA
+ select HAVE_KPROBES
+ select HAVE_KRETPROBES
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_PERF_EVENTS
+ select HAVE_SYSCALL_TRACEPOINTS
+ select IRQ_DOMAIN
+ select LOCK_MM_AND_FIND_VMA
+ select MODULES_USE_ELF_RELA
+ select OF
+ select OF_EARLY_FLATTREE
+ select PCI_SYSCALL if PCI
+ select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
+ select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32
+ select TRACE_IRQFLAGS_SUPPORT
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config SCHED_OMIT_FRAME_POINTER
+ def_bool y
+
+config GENERIC_CSUM
+ def_bool y
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+
+config MMU
+ def_bool y
+
+config NO_IOPORT_MAP
+ def_bool y
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+ select STACKTRACE
+
+menu "ARC Architecture Configuration"
+
+menu "ARC Platform/SoC/Board"
+
+source "arch/arc/plat-tb10x/Kconfig"
+source "arch/arc/plat-axs10x/Kconfig"
+source "arch/arc/plat-hsdk/Kconfig"
+
+endmenu
+
+choice
+ prompt "ARC Instruction Set"
+ default ISA_ARCV2
+
+config ISA_ARCOMPACT
+ bool "ARCompact ISA"
+ select CPU_NO_EFFICIENT_FFS
+ help
+ The original ARC ISA of ARC600/700 cores
+
+config ISA_ARCV2
+ bool "ARC ISA v2"
+ select ARC_TIMERS_64BIT
+ help
+ ISA for the Next Generation ARC-HS cores
+
+endchoice
+
+menu "ARC CPU Configuration"
+
+choice
+ prompt "ARC Core"
+ default ARC_CPU_770 if ISA_ARCOMPACT
+ default ARC_CPU_HS if ISA_ARCV2
+
+config ARC_CPU_770
+ bool "ARC770"
+ depends on ISA_ARCOMPACT
+ select ARC_HAS_SWAPE
+ help
+ Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
+ This core has a bunch of cool new features:
+ -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
+ Shared Address Spaces (for sharing TLB entries in MMU)
+ -Caches: New Prog Model, Region Flush
+ -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
+
+config ARC_CPU_HS
+ bool "ARC-HS"
+ depends on ISA_ARCV2
+ help
+ Support for ARC HS38x Cores based on ARCv2 ISA
+ The notable features are:
+ - SMP configurations of up to 4 cores with coherency
+ - Optional L2 Cache and IO-Coherency
+ - Revised Interrupt Architecture (multiple priorites, reg banks,
+ auto stack switch, auto regfile save/restore)
+ - MMUv4 (PIPT dcache, Huge Pages)
+ - Instructions for
+ * 64bit load/store: LDD, STD
+ * Hardware assisted divide/remainder: DIV, REM
+ * Function prologue/epilogue: ENTER_S, LEAVE_S
+ * IRQ enable/disable: CLRI, SETI
+ * pop count: FFS, FLS
+ * SETcc, BMSKN, XBFU...
+
+endchoice
+
+config ARC_TUNE_MCPU
+ string "Override default -mcpu compiler flag"
+ default ""
+ help
+ Override default -mcpu=xxx compiler flag (which is set depending on
+ the ISA version) with the specified value.
+ NOTE: If specified flag isn't supported by current compiler the
+ ISA default value will be used as a fallback.
+
+config CPU_BIG_ENDIAN
+ bool "Enable Big Endian Mode"
+ help
+ Build kernel for Big Endian Mode of ARC CPU
+
+config SMP
+ bool "Symmetric Multi-Processing"
+ select ARC_MCIP if ISA_ARCV2
+ help
+ This enables support for systems with more than one CPU.
+
+if SMP
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-4096)"
+ range 2 4096
+ default "4"
+
+config ARC_SMP_HALT_ON_RESET
+ bool "Enable Halt-on-reset boot mode"
+ help
+ In SMP configuration cores can be configured as Halt-on-reset
+ or they could all start at same time. For Halt-on-reset, non
+ masters are parked until Master kicks them so they can start off
+ at designated entry point. For other case, all jump to common
+ entry point and spin wait for Master's signal.
+
+endif #SMP
+
+config ARC_MCIP
+ bool "ARConnect Multicore IP (MCIP) Support "
+ depends on ISA_ARCV2
+ default y if SMP
+ help
+ This IP block enables SMP in ARC-HS38 cores.
+ It provides for cross-core interrupts, multi-core debug
+ hardware semaphores, shared memory,....
+
+menuconfig ARC_CACHE
+ bool "Enable Cache Support"
+ default y
+
+if ARC_CACHE
+
+config ARC_CACHE_LINE_SHIFT
+ int "Cache Line Length (as power of 2)"
+ range 5 7
+ default "6"
+ help
+ Starting with ARC700 4.9, Cache line length is configurable,
+ This option specifies "N", with Line-len = 2 power N
+ So line lengths of 32, 64, 128 are specified by 5,6,7, respectively
+ Linux only supports same line lengths for I and D caches.
+
+config ARC_HAS_ICACHE
+ bool "Use Instruction Cache"
+ default y
+
+config ARC_HAS_DCACHE
+ bool "Use Data Cache"
+ default y
+
+config ARC_CACHE_PAGES
+ bool "Per Page Cache Control"
+ default y
+ depends on ARC_HAS_ICACHE || ARC_HAS_DCACHE
+ help
+ This can be used to over-ride the global I/D Cache Enable on a
+ per-page basis (but only for pages accessed via MMU such as
+ Kernel Virtual address or User Virtual Address)
+ TLB entries have a per-page Cache Enable Bit.
+ Note that Global I/D ENABLE + Per Page DISABLE works but corollary
+ Global DISABLE + Per Page ENABLE won't work
+
+config ARC_CACHE_VIPT_ALIASING
+ bool "Support VIPT Aliasing D$"
+ depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
+
+endif #ARC_CACHE
+
+config ARC_HAS_ICCM
+ bool "Use ICCM"
+ help
+ Single Cycle RAMS to store Fast Path Code
+
+config ARC_ICCM_SZ
+ int "ICCM Size in KB"
+ default "64"
+ depends on ARC_HAS_ICCM
+
+config ARC_HAS_DCCM
+ bool "Use DCCM"
+ help
+ Single Cycle RAMS to store Fast Path Data
+
+config ARC_DCCM_SZ
+ int "DCCM Size in KB"
+ default "64"
+ depends on ARC_HAS_DCCM
+
+config ARC_DCCM_BASE
+ hex "DCCM map address"
+ default "0xA0000000"
+ depends on ARC_HAS_DCCM
+
+choice
+ prompt "MMU Version"
+ default ARC_MMU_V3 if ISA_ARCOMPACT
+ default ARC_MMU_V4 if ISA_ARCV2
+
+config ARC_MMU_V3
+ bool "MMU v3"
+ depends on ISA_ARCOMPACT
+ help
+ Introduced with ARC700 4.10: New Features
+ Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
+ Shared Address Spaces (SASID)
+
+config ARC_MMU_V4
+ bool "MMU v4"
+ depends on ISA_ARCV2
+
+endchoice
+
+
+choice
+ prompt "MMU Page Size"
+ default ARC_PAGE_SIZE_8K
+
+config ARC_PAGE_SIZE_8K
+ bool "8KB"
+ help
+ Choose between 8k vs 16k
+
+config ARC_PAGE_SIZE_16K
+ bool "16KB"
+
+config ARC_PAGE_SIZE_4K
+ bool "4KB"
+ depends on ARC_MMU_V3 || ARC_MMU_V4
+
+endchoice
+
+choice
+ prompt "MMU Super Page Size"
+ depends on ISA_ARCV2 && TRANSPARENT_HUGEPAGE
+ default ARC_HUGEPAGE_2M
+
+config ARC_HUGEPAGE_2M
+ bool "2MB"
+
+config ARC_HUGEPAGE_16M
+ bool "16MB"
+
+endchoice
+
+config PGTABLE_LEVELS
+ int "Number of Page table levels"
+ default 2
+
+config ARC_COMPACT_IRQ_LEVELS
+ depends on ISA_ARCOMPACT
+ bool "Setup Timer IRQ as high Priority"
+ # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
+ depends on !SMP
+
+config ARC_FPU_SAVE_RESTORE
+ bool "Enable FPU state persistence across context switch"
+ help
+ ARCompact FPU has internal registers to assist with Double precision
+ Floating Point operations. There are control and stauts registers
+ for floating point exceptions and rounding modes. These are
+ preserved across task context switch when enabled.
+
+config ARC_CANT_LLSC
+ def_bool n
+
+config ARC_HAS_LLSC
+ bool "Insn: LLOCK/SCOND (efficient atomic ops)"
+ default y
+ depends on !ARC_CANT_LLSC
+
+config ARC_HAS_SWAPE
+ bool "Insn: SWAPE (endian-swap)"
+ default y
+
+if ISA_ARCV2
+
+config ARC_USE_UNALIGNED_MEM_ACCESS
+ bool "Enable unaligned access in HW"
+ default y
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ help
+ The ARC HS architecture supports unaligned memory access
+ which is disabled by default. Enable unaligned access in
+ hardware and use software to use it
+
+config ARC_HAS_LL64
+ bool "Insn: 64bit LDD/STD"
+ help
+ Enable gcc to generate 64-bit load/store instructions
+ ISA mandates even/odd registers to allow encoding of two
+ dest operands with 2 possible source operands.
+ default y
+
+config ARC_HAS_DIV_REM
+ bool "Insn: div, divu, rem, remu"
+ default y
+
+config ARC_HAS_ACCL_REGS
+ bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6 and/or DSP)"
+ default y
+ help
+ Depending on the configuration, CPU can contain accumulator reg-pair
+ (also referred to as r58:r59). These can also be used by gcc as GPR so
+ kernel needs to save/restore per process
+
+config ARC_DSP_HANDLED
+ def_bool n
+
+config ARC_DSP_SAVE_RESTORE_REGS
+ def_bool n
+
+choice
+ prompt "DSP support"
+ default ARC_DSP_NONE
+ help
+ Depending on the configuration, CPU can contain DSP registers
+ (ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_CTRL, DSP_FFT_CTRL).
+ Below are options describing how to handle these registers in
+ interrupt entry / exit and in context switch.
+
+config ARC_DSP_NONE
+ bool "No DSP extension presence in HW"
+ help
+ No DSP extension presence in HW
+
+config ARC_DSP_KERNEL
+ bool "DSP extension in HW, no support for userspace"
+ select ARC_HAS_ACCL_REGS
+ select ARC_DSP_HANDLED
+ help
+ DSP extension presence in HW, no support for DSP-enabled userspace
+ applications. We don't save / restore DSP registers and only do
+ some minimal preparations so userspace won't be able to break kernel
+
+config ARC_DSP_USERSPACE
+ bool "Support DSP for userspace apps"
+ select ARC_HAS_ACCL_REGS
+ select ARC_DSP_HANDLED
+ select ARC_DSP_SAVE_RESTORE_REGS
+ help
+ DSP extension presence in HW, support save / restore DSP registers to
+ run DSP-enabled userspace applications
+
+config ARC_DSP_AGU_USERSPACE
+ bool "Support DSP with AGU for userspace apps"
+ select ARC_HAS_ACCL_REGS
+ select ARC_DSP_HANDLED
+ select ARC_DSP_SAVE_RESTORE_REGS
+ help
+ DSP and AGU extensions presence in HW, support save / restore DSP
+ and AGU registers to run DSP-enabled userspace applications
+endchoice
+
+config ARC_IRQ_NO_AUTOSAVE
+ bool "Disable hardware autosave regfile on interrupts"
+ default n
+ help
+ On HS cores, taken interrupt auto saves the regfile on stack.
+ This is programmable and can be optionally disabled in which case
+ software INTERRUPT_PROLOGUE/EPILGUE do the needed work
+
+config ARC_LPB_DISABLE
+ bool "Disable loop buffer (LPB)"
+ help
+ On HS cores, loop buffer (LPB) is programmable in runtime and can
+ be optionally disabled.
+
+endif # ISA_ARCV2
+
+endmenu # "ARC CPU Configuration"
+
+config LINUX_LINK_BASE
+ hex "Kernel link address"
+ default "0x80000000"
+ help
+ ARC700 divides the 32 bit phy address space into two equal halves
+ -Lower 2G (0 - 0x7FFF_FFFF ) is user virtual, translated by MMU
+ -Upper 2G (0x8000_0000 onwards) is untranslated, for kernel
+ Typically Linux kernel is linked at the start of untransalted addr,
+ hence the default value of 0x8zs.
+ However some customers have peripherals mapped at this addr, so
+ Linux needs to be scooted a bit.
+ If you don't know what the above means, leave this setting alone.
+ This needs to match memory start address specified in Device Tree
+
+config LINUX_RAM_BASE
+ hex "RAM base address"
+ default LINUX_LINK_BASE
+ help
+ By default Linux is linked at base of RAM. However in some special
+ cases (such as HSDK), Linux can't be linked at start of DDR, hence
+ this option.
+
+config HIGHMEM
+ bool "High Memory Support"
+ select HAVE_ARCH_PFN_VALID
+ select KMAP_LOCAL
+ help
+ With ARC 2G:2G address split, only upper 2G is directly addressable by
+ kernel. Enable this to potentially allow access to rest of 2G and PAE
+ in future
+
+config ARC_HAS_PAE40
+ bool "Support for the 40-bit Physical Address Extension"
+ depends on ISA_ARCV2
+ select HIGHMEM
+ select PHYS_ADDR_T_64BIT
+ help
+ Enable access to physical memory beyond 4G, only supported on
+ ARC cores with 40 bit Physical Addressing support
+
+config ARC_KVADDR_SIZE
+ int "Kernel Virtual Address Space size (MB)"
+ range 0 512
+ default "256"
+ help
+ The kernel address space is carved out of 256MB of translated address
+ space for catering to vmalloc, modules, pkmap, fixmap. This however may
+ not suffice vmalloc requirements of a 4K CPU EZChip system. So allow
+ this to be stretched to 512 MB (by extending into the reserved
+ kernel-user gutter)
+
+config ARC_CURR_IN_REG
+ bool "cache current task pointer in gp"
+ default y
+ help
+ This reserves gp register to point to Current Task in
+ kernel mode eliding memory access for each access
+
+
+config ARC_EMUL_UNALIGNED
+ bool "Emulate unaligned memory access (userspace only)"
+ select SYSCTL_ARCH_UNALIGN_NO_WARN
+ select SYSCTL_ARCH_UNALIGN_ALLOW
+ depends on ISA_ARCOMPACT
+ help
+ This enables misaligned 16 & 32 bit memory access from user space.
+ Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
+ potential bugs in code
+
+config HZ
+ int "Timer Frequency"
+ default 100
+
+config ARC_METAWARE_HLINK
+ bool "Support for Metaware debugger assisted Host access"
+ help
+ This options allows a Linux userland apps to directly access
+ host file system (open/creat/read/write etc) with help from
+ Metaware Debugger. This can come in handy for Linux-host communication
+ when there is no real usable peripheral such as EMAC.
+
+menuconfig ARC_DBG
+ bool "ARC debugging"
+ default y
+
+if ARC_DBG
+
+config ARC_DW2_UNWIND
+ bool "Enable DWARF specific kernel stack unwind"
+ default y
+ select KALLSYMS
+ help
+ Compiles the kernel with DWARF unwind information and can be used
+ to get stack backtraces.
+
+ If you say Y here the resulting kernel image will be slightly larger
+ but not slower, and it will give very useful debugging information.
+ If you don't debug the kernel, you can say N, but we may not be able
+ to solve problems without frame unwind information
+
+config ARC_DBG_JUMP_LABEL
+ bool "Paranoid checks in Static Keys (jump labels) code"
+ depends on JUMP_LABEL
+ default y if STATIC_KEYS_SELFTEST
+ help
+ Enable paranoid checks and self-test of both ARC-specific and generic
+ part of static keys (jump labels) related code.
+endif
+
+config ARC_BUILTIN_DTB_NAME
+ string "Built in DTB"
+ help
+ Set the name of the DTB to embed in the vmlinux binary
+ Leaving it blank selects the minimal "skeleton" dtb
+
+endmenu # "ARC Architecture Configuration"
+
+config ARCH_FORCE_MAX_ORDER
+ int "Maximum zone order"
+ default "11" if ARC_HUGEPAGE_16M
+ default "10"
+
+source "kernel/power/Kconfig"
diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug
new file mode 100644
index 0000000000..45add86dec
--- /dev/null
+++ b/arch/arc/Kconfig.debug
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config 16KSTACKS
+ bool "Use 16Kb for kernel stacks instead of 8Kb"
+ help
+ If you say Y here the kernel will use a 16Kb stacksize for the
+ kernel stack attached to each process/thread. The default is 8K.
+ This increases the resident kernel footprint and will cause less
+ threads to run on the system and also increase the pressure
+ on the VM subsystem for higher order allocations.
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
new file mode 100644
index 0000000000..2390dd042e
--- /dev/null
+++ b/arch/arc/Makefile
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+
+KBUILD_DEFCONFIG := haps_hs_smp_defconfig
+
+ifeq ($(CROSS_COMPILE),)
+CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
+endif
+
+cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
+
+tune-mcpu-def-$(CONFIG_ISA_ARCOMPACT) := -mcpu=arc700
+tune-mcpu-def-$(CONFIG_ISA_ARCV2) := -mcpu=hs38
+
+ifeq ($(CONFIG_ARC_TUNE_MCPU),)
+cflags-y += $(tune-mcpu-def-y)
+else
+tune-mcpu := $(CONFIG_ARC_TUNE_MCPU)
+ifneq ($(call cc-option,$(tune-mcpu)),)
+cflags-y += $(tune-mcpu)
+else
+# The flag provided by 'CONFIG_ARC_TUNE_MCPU' option isn't known by this compiler
+# (probably the compiler is too old). Use ISA default mcpu flag instead as a safe option.
+$(warning ** WARNING ** CONFIG_ARC_TUNE_MCPU flag '$(tune-mcpu)' is unknown, fallback to '$(tune-mcpu-def-y)')
+cflags-y += $(tune-mcpu-def-y)
+endif
+endif
+
+ifdef CONFIG_ARC_CURR_IN_REG
+# For a global register definition, make sure it gets passed to every file
+# We had a customer reported bug where some code built in kernel was NOT using
+# any kernel headers, and missing the global register
+# Can't do unconditionally because of recursive include issues
+# due to <linux/thread_info.h>
+LINUXINCLUDE += -include $(srctree)/arch/arc/include/asm/current.h
+cflags-y += -ffixed-gp
+endif
+
+cflags-y += -fsection-anchors
+
+cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
+
+ifdef CONFIG_ISA_ARCV2
+
+ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+cflags-y += -munaligned-access
+else
+cflags-y += -mno-unaligned-access
+endif
+
+ifndef CONFIG_ARC_HAS_LL64
+cflags-y += -mno-ll64
+endif
+
+ifndef CONFIG_ARC_HAS_DIV_REM
+cflags-y += -mno-div-rem
+endif
+
+endif
+
+cfi := $(call as-instr,.cfi_startproc\n.cfi_endproc,-DARC_DW2_UNWIND_AS_CFI)
+cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi)
+
+# small data is default for elf32 tool-chain. If not usable, disable it
+# This also allows repurposing GP as scratch reg to gcc reg allocator
+disable_small_data := y
+cflags-$(disable_small_data) += -mno-sdata
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
+ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
+
+LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
+
+# Modules with short calls might break for calls into builtin-kernel
+KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
+
+# Finally dump eveything into kernel build system
+KBUILD_CFLAGS += $(cflags-y)
+KBUILD_AFLAGS += $(KBUILD_CFLAGS)
+KBUILD_LDFLAGS += $(ldflags-y)
+
+# w/o this dtb won't embed into kernel binary
+core-y += arch/arc/boot/dts/
+
+core-y += arch/arc/plat-sim/
+core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/
+core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/
+core-$(CONFIG_ARC_SOC_HSDK) += arch/arc/plat-hsdk/
+
+libs-y += arch/arc/lib/ $(LIBGCC)
+
+boot := arch/arc/boot
+
+boot_targets := uImage.bin uImage.gz uImage.lzma
+
+PHONY += $(boot_targets)
+$(boot_targets): vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+uimage-default-y := uImage.bin
+uimage-default-$(CONFIG_KERNEL_GZIP) := uImage.gz
+uimage-default-$(CONFIG_KERNEL_LZMA) := uImage.lzma
+
+PHONY += uImage
+uImage: $(uimage-default-y)
+ @ln -sf $< $(boot)/uImage
+ @$(kecho) ' Image $(boot)/uImage is ready'
+
+CLEAN_FILES += $(boot)/uImage
diff --git a/arch/arc/boot/.gitignore b/arch/arc/boot/.gitignore
new file mode 100644
index 0000000000..675db14940
--- /dev/null
+++ b/arch/arc/boot/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+uImage
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
new file mode 100644
index 0000000000..5648748c28
--- /dev/null
+++ b/arch/arc/boot/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# uImage build relies on mkimage being availble on your host for ARC target
+# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
+# and make sure it's reacable from your PATH
+
+OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+LINUX_START_TEXT = $$($(READELF) -h vmlinux | \
+ grep "Entry point address" | grep -o 0x.*)
+
+UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE)
+UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
+
+targets += vmlinux.bin
+targets += vmlinux.bin.gz
+targets += vmlinux.bin.lzma
+targets += uImage.bin
+targets += uImage.gz
+targets += uImage.lzma
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma)
+
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,uimage,none)
+
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,uimage,gzip)
+
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
+ $(call if_changed,uimage,lzma)
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
new file mode 100644
index 0000000000..4237aa5de3
--- /dev/null
+++ b/arch/arc/boot/dts/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+# Built-in dtb
+builtindtb-y := nsim_700
+
+ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),)
+ builtindtb-y := $(CONFIG_ARC_BUILTIN_DTB_NAME)
+endif
+
+obj-y += $(builtindtb-y).dtb.o
+dtb-y := $(builtindtb-y).dtb
+
+# for CONFIG_OF_ALL_DTBS test
+dtstree := $(srctree)/$(src)
+dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+
+# board-specific dtc flags
+DTC_FLAGS_hsdk += --pad 20
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi
new file mode 100644
index 0000000000..41026a3bfa
--- /dev/null
+++ b/arch/arc/boot/dts/abilis_tb100.dtsi
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Abilis Systems TB100 SOC device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ */
+
+/include/ "abilis_tb10x.dtsi"
+
+
+/ {
+ soc100 {
+ bus-frequency = <166666666>;
+
+ pll0: oscillator {
+ clock-frequency = <1000000000>;
+ };
+ cpu_clk: clkdiv_cpu {
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
+ ahb_clk: clkdiv_ahb {
+ clock-mult = <1>;
+ clock-div = <6>;
+ };
+
+ iomux: iomux@ff10601c {
+ /* Port 1 */
+ pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
+ abilis,function = "mis0";
+ };
+ pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */
+ abilis,function = "mis1";
+ };
+ pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */
+ abilis,function = "gpioa";
+ };
+ pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */
+ abilis,function = "mip1";
+ };
+ /* Port 2 */
+ pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */
+ abilis,function = "mis2";
+ };
+ pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */
+ abilis,function = "mis3";
+ };
+ pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */
+ abilis,function = "gpioc";
+ };
+ pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */
+ abilis,function = "mip3";
+ };
+ /* Port 3 */
+ pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */
+ abilis,function = "mis4";
+ };
+ pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */
+ abilis,function = "mis5";
+ };
+ pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */
+ abilis,function = "gpioe";
+ };
+ pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */
+ abilis,function = "mip5";
+ };
+ /* Port 4 */
+ pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */
+ abilis,function = "mis6";
+ };
+ pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */
+ abilis,function = "mis7";
+ };
+ pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */
+ abilis,function = "gpiog";
+ };
+ pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */
+ abilis,function = "mip7";
+ };
+ /* Port 5 */
+ pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */
+ abilis,function = "gpioj";
+ };
+ pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */
+ abilis,function = "gpiok";
+ };
+ pctl_ciplus: pctl-ciplus { /* CI+ interface */
+ abilis,function = "ciplus";
+ };
+ pctl_mcard: pctl-mcard { /* M-Card interface */
+ abilis,function = "mcard";
+ };
+ /* Port 6 */
+ pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */
+ abilis,function = "mop";
+ };
+ pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */
+ abilis,function = "mos0";
+ };
+ pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */
+ abilis,function = "mos1";
+ };
+ pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */
+ abilis,function = "mos2";
+ };
+ pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */
+ abilis,function = "mos3";
+ };
+ /* Port 7 */
+ pctl_uart0: pctl-uart0 { /* UART 0 */
+ abilis,function = "uart0";
+ };
+ pctl_uart1: pctl-uart1 { /* UART 1 */
+ abilis,function = "uart1";
+ };
+ pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */
+ abilis,function = "gpiol";
+ };
+ pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */
+ abilis,function = "gpiom";
+ };
+ /* Port 8 */
+ pctl_spi3: pctl-spi3 {
+ abilis,function = "spi3";
+ };
+ /* Port 9 */
+ pctl_spi1: pctl-spi1 {
+ abilis,function = "spi1";
+ };
+ pctl_gpio_n: pctl-gpio-n {
+ abilis,function = "gpion";
+ };
+ /* Unmuxed GPIOs */
+ pctl_gpio_b: pctl-gpio-b {
+ abilis,function = "gpiob";
+ };
+ pctl_gpio_d: pctl-gpio-d {
+ abilis,function = "gpiod";
+ };
+ pctl_gpio_f: pctl-gpio-f {
+ abilis,function = "gpiof";
+ };
+ pctl_gpio_h: pctl-gpio-h {
+ abilis,function = "gpioh";
+ };
+ pctl_gpio_i: pctl-gpio-i {
+ abilis,function = "gpioi";
+ };
+ };
+
+ gpioa: gpio@ff140000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff140000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioa";
+ };
+ gpiob: gpio@ff141000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff141000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiob";
+ };
+ gpioc: gpio@ff142000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff142000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioc";
+ };
+ gpiod: gpio@ff143000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff143000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiod";
+ };
+ gpioe: gpio@ff144000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff144000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioe";
+ };
+ gpiof: gpio@ff145000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff145000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiof";
+ };
+ gpiog: gpio@ff146000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff146000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiog";
+ };
+ gpioh: gpio@ff147000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff147000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioh";
+ };
+ gpioi: gpio@ff148000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff148000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <12>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioi";
+ };
+ gpioj: gpio@ff149000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff149000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <32>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioj";
+ };
+ gpiok: gpio@ff14a000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff14a000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <22>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiok";
+ };
+ gpiol: gpio@ff14b000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff14b000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <4>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiol";
+ };
+ gpiom: gpio@ff14c000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff14c000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <4>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiom";
+ };
+ gpion: gpio@ff14d000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff14d000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <5>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpion";
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/abilis_tb100_dvk.dts b/arch/arc/boot/dts/abilis_tb100_dvk.dts
new file mode 100644
index 0000000000..6d346de5e3
--- /dev/null
+++ b/arch/arc/boot/dts/abilis_tb100_dvk.dts
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Abilis Systems TB100 Development Kit PCB device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ */
+
+/dts-v1/;
+
+/include/ "abilis_tb100.dtsi"
+
+/ {
+ model = "abilis,tb100";
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xff100000,9600n8 console=ttyS0,9600n8";
+ };
+
+ aliases { };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x08000000>; /* 128M */
+ };
+
+ soc100 {
+ uart@ff100000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pctl_uart0>;
+ };
+ ethernet@fe100000 {
+ phy-mode = "rgmii";
+ };
+
+ i2c0: i2c@ff120000 {
+ i2c-sda-hold-time-ns = <432>;
+ };
+ i2c1: i2c@ff121000 {
+ i2c-sda-hold-time-ns = <432>;
+ };
+ i2c2: i2c@ff122000 {
+ i2c-sda-hold-time-ns = <432>;
+ };
+ i2c3: i2c@ff123000 {
+ i2c-sda-hold-time-ns = <432>;
+ };
+ i2c4: i2c@ff124000 {
+ i2c-sda-hold-time-ns = <432>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ power {
+ label = "Power";
+ gpios = <&gpioi 0 0>;
+ linux,default-trigger = "default-on";
+ };
+ heartbeat {
+ label = "Heartbeat";
+ gpios = <&gpioi 1 0>;
+ linux,default-trigger = "heartbeat";
+ };
+ led2 {
+ label = "LED2";
+ gpios = <&gpioi 2 0>;
+ default-state = "off";
+ };
+ led3 {
+ label = "LED3";
+ gpios = <&gpioi 3 0>;
+ default-state = "off";
+ };
+ led4 {
+ label = "LED4";
+ gpios = <&gpioi 4 0>;
+ default-state = "off";
+ };
+ led5 {
+ label = "LED5";
+ gpios = <&gpioi 5 0>;
+ default-state = "off";
+ };
+ led6 {
+ label = "LED6";
+ gpios = <&gpioi 6 0>;
+ default-state = "off";
+ };
+ led7 {
+ label = "LED7";
+ gpios = <&gpioi 7 0>;
+ default-state = "off";
+ };
+ led8 {
+ label = "LED8";
+ gpios = <&gpioi 8 0>;
+ default-state = "off";
+ };
+ led9 {
+ label = "LED9";
+ gpios = <&gpioi 9 0>;
+ default-state = "off";
+ };
+ led10 {
+ label = "LED10";
+ gpios = <&gpioi 10 0>;
+ default-state = "off";
+ };
+ led11 {
+ label = "LED11";
+ gpios = <&gpioi 11 0>;
+ default-state = "off";
+ };
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi
new file mode 100644
index 0000000000..041ab1ba02
--- /dev/null
+++ b/arch/arc/boot/dts/abilis_tb101.dtsi
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Abilis Systems TB101 SOC device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ */
+
+/include/ "abilis_tb10x.dtsi"
+
+
+/ {
+ soc100 {
+ bus-frequency = <166666666>;
+
+ pll0: oscillator {
+ clock-frequency = <1000000000>;
+ };
+ cpu_clk: clkdiv_cpu {
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
+ ahb_clk: clkdiv_ahb {
+ clock-mult = <1>;
+ clock-div = <6>;
+ };
+
+ iomux: iomux@ff10601c {
+ /* Port 1 */
+ pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
+ abilis,function = "mis0";
+ };
+ pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */
+ abilis,function = "mis1";
+ };
+ pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */
+ abilis,function = "gpioa";
+ };
+ pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */
+ abilis,function = "mip1";
+ };
+ /* Port 2 */
+ pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */
+ abilis,function = "mis2";
+ };
+ pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */
+ abilis,function = "mis3";
+ };
+ pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */
+ abilis,function = "gpioc";
+ };
+ pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */
+ abilis,function = "mip3";
+ };
+ /* Port 3 */
+ pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */
+ abilis,function = "mis4";
+ };
+ pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */
+ abilis,function = "mis5";
+ };
+ pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */
+ abilis,function = "gpioe";
+ };
+ pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */
+ abilis,function = "mip5";
+ };
+ /* Port 4 */
+ pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */
+ abilis,function = "mis6";
+ };
+ pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */
+ abilis,function = "mis7";
+ };
+ pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */
+ abilis,function = "gpiog";
+ };
+ pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */
+ abilis,function = "mip7";
+ };
+ /* Port 5 */
+ pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */
+ abilis,function = "gpioj";
+ };
+ pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */
+ abilis,function = "gpiok";
+ };
+ pctl_ciplus: pctl-ciplus { /* CI+ interface */
+ abilis,function = "ciplus";
+ };
+ pctl_mcard: pctl-mcard { /* M-Card interface */
+ abilis,function = "mcard";
+ };
+ pctl_stc0: pctl-stc0 { /* Smart card I/F 0 */
+ abilis,function = "stc0";
+ };
+ pctl_stc1: pctl-stc1 { /* Smart card I/F 1 */
+ abilis,function = "stc1";
+ };
+ /* Port 6 */
+ pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */
+ abilis,function = "mop";
+ };
+ pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */
+ abilis,function = "mos0";
+ };
+ pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */
+ abilis,function = "mos1";
+ };
+ pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */
+ abilis,function = "mos2";
+ };
+ pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */
+ abilis,function = "mos3";
+ };
+ /* Port 7 */
+ pctl_uart0: pctl-uart0 { /* UART 0 */
+ abilis,function = "uart0";
+ };
+ pctl_uart1: pctl-uart1 { /* UART 1 */
+ abilis,function = "uart1";
+ };
+ pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */
+ abilis,function = "gpiol";
+ };
+ pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */
+ abilis,function = "gpiom";
+ };
+ /* Port 8 */
+ pctl_spi3: pctl-spi3 {
+ abilis,function = "spi3";
+ };
+ pctl_jtag: pctl-jtag {
+ abilis,function = "jtag";
+ };
+ /* Port 9 */
+ pctl_spi1: pctl-spi1 {
+ abilis,function = "spi1";
+ };
+ pctl_gpio_n: pctl-gpio-n {
+ abilis,function = "gpion";
+ };
+ /* Unmuxed GPIOs */
+ pctl_gpio_b: pctl-gpio-b {
+ abilis,function = "gpiob";
+ };
+ pctl_gpio_d: pctl-gpio-d {
+ abilis,function = "gpiod";
+ };
+ pctl_gpio_f: pctl-gpio-f {
+ abilis,function = "gpiof";
+ };
+ pctl_gpio_h: pctl-gpio-h {
+ abilis,function = "gpioh";
+ };
+ pctl_gpio_i: pctl-gpio-i {
+ abilis,function = "gpioi";
+ };
+ };
+
+ gpioa: gpio@ff140000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff140000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioa";
+ };
+ gpiob: gpio@ff141000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff141000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiob";
+ };
+ gpioc: gpio@ff142000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff142000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioc";
+ };
+ gpiod: gpio@ff143000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff143000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiod";
+ };
+ gpioe: gpio@ff144000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff144000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioe";
+ };
+ gpiof: gpio@ff145000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff145000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiof";
+ };
+ gpiog: gpio@ff146000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff146000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <3>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiog";
+ };
+ gpioh: gpio@ff147000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff147000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <2>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioh";
+ };
+ gpioi: gpio@ff148000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff148000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <12>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioi";
+ };
+ gpioj: gpio@ff149000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff149000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <32>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpioj";
+ };
+ gpiok: gpio@ff14a000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff14a000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <22>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiok";
+ };
+ gpiol: gpio@ff14b000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff14b000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <4>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiol";
+ };
+ gpiom: gpio@ff14c000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff14c000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <4>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpiom";
+ };
+ gpion: gpio@ff14d000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 2>;
+ reg = <0xff14d000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ abilis,ngpio = <5>;
+ gpio-ranges = <&iomux 0 0 0>;
+ gpio-ranges-group-names = "gpion";
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/abilis_tb101_dvk.dts b/arch/arc/boot/dts/abilis_tb101_dvk.dts
new file mode 100644
index 0000000000..d11b790f8b
--- /dev/null
+++ b/arch/arc/boot/dts/abilis_tb101_dvk.dts
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Abilis Systems TB101 Development Kit PCB device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ */
+
+/dts-v1/;
+
+/include/ "abilis_tb101.dtsi"
+
+/ {
+ model = "abilis,tb101";
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xff100000,9600n8 console=ttyS0,9600n8";
+ };
+
+ aliases { };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x08000000>; /* 128M */
+ };
+
+ soc100 {
+ uart@ff100000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pctl_uart0>;
+ };
+ ethernet@fe100000 {
+ phy-mode = "rgmii";
+ };
+
+ i2c0: i2c@ff120000 {
+ i2c-sda-hold-time-ns = <432>;
+ };
+ i2c1: i2c@ff121000 {
+ i2c-sda-hold-time-ns = <432>;
+ };
+ i2c2: i2c@ff122000 {
+ i2c-sda-hold-time-ns = <432>;
+ };
+ i2c3: i2c@ff123000 {
+ i2c-sda-hold-time-ns = <432>;
+ };
+ i2c4: i2c@ff124000 {
+ i2c-sda-hold-time-ns = <432>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ power {
+ label = "Power";
+ gpios = <&gpioi 0 0>;
+ linux,default-trigger = "default-on";
+ };
+ heartbeat {
+ label = "Heartbeat";
+ gpios = <&gpioi 1 0>;
+ linux,default-trigger = "heartbeat";
+ };
+ led2 {
+ label = "LED2";
+ gpios = <&gpioi 2 0>;
+ default-state = "off";
+ };
+ led3 {
+ label = "LED3";
+ gpios = <&gpioi 3 0>;
+ default-state = "off";
+ };
+ led4 {
+ label = "LED4";
+ gpios = <&gpioi 4 0>;
+ default-state = "off";
+ };
+ led5 {
+ label = "LED5";
+ gpios = <&gpioi 5 0>;
+ default-state = "off";
+ };
+ led6 {
+ label = "LED6";
+ gpios = <&gpioi 6 0>;
+ default-state = "off";
+ };
+ led7 {
+ label = "LED7";
+ gpios = <&gpioi 7 0>;
+ default-state = "off";
+ };
+ led8 {
+ label = "LED8";
+ gpios = <&gpioi 8 0>;
+ default-state = "off";
+ };
+ led9 {
+ label = "LED9";
+ gpios = <&gpioi 9 0>;
+ default-state = "off";
+ };
+ led10 {
+ label = "LED10";
+ gpios = <&gpioi 10 0>;
+ default-state = "off";
+ };
+ led11 {
+ label = "LED11";
+ gpios = <&gpioi 11 0>;
+ default-state = "off";
+ };
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
new file mode 100644
index 0000000000..aa62619f21
--- /dev/null
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Abilis Systems TB10X SOC device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ */
+
+
+/ {
+ compatible = "abilis,arc-tb10x";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "snps,arc770d";
+ reg = <0>;
+ };
+ };
+
+ /* TIMER0 with interrupt for clockevent */
+ timer0 {
+ compatible = "snps,arc-timer";
+ interrupts = <3>;
+ interrupt-parent = <&intc>;
+ clocks = <&cpu_clk>;
+ };
+
+ /* TIMER1 for free running clocksource */
+ timer1 {
+ compatible = "snps,arc-timer";
+ clocks = <&cpu_clk>;
+ };
+
+ soc100 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ ranges = <0xfe000000 0xfe000000 0x02000000
+ 0x000f0000 0x000f0000 0x00010000>;
+ compatible = "abilis,tb10x", "simple-bus";
+
+ pll0: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-output-names = "pll0";
+ };
+ cpu_clk: clkdiv_cpu {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+ clocks = <&pll0>;
+ clock-output-names = "cpu_clk";
+ };
+ ahb_clk: clkdiv_ahb {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+ clocks = <&pll0>;
+ clock-output-names = "ahb_clk";
+ };
+
+ iomux: iomux@ff10601c {
+ compatible = "abilis,tb10x-iomux";
+ #gpio-range-cells = <3>;
+ reg = <0xff10601c 0x4>;
+ };
+
+ intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ tb10x_ictl: pic@fe002000 {
+ compatible = "abilis,tb10x-ictl";
+ reg = <0xfe002000 0x20>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupts = <5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29 30 31>;
+ };
+
+ uart@ff100000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0xff100000 0x100>;
+ clock-frequency = <166666666>;
+ interrupts = <25 8>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupt-parent = <&tb10x_ictl>;
+ };
+ ethernet@fe100000 {
+ compatible = "snps,dwmac-3.70a","snps,dwmac";
+ reg = <0xfe100000 0x1058>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <6 8>;
+ interrupt-names = "macirq";
+ clocks = <&ahb_clk>;
+ clock-names = "stmmaceth";
+ };
+ dma@fe000000 {
+ compatible = "snps,dma-spear1340";
+ reg = <0xfe000000 0x400>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <14 8>;
+ dma-channels = <6>;
+ dma-requests = <0>;
+ dma-masters = <1>;
+ #dma-cells = <3>;
+ chan_allocation_order = <0>;
+ chan_priority = <1>;
+ block_size = <0x7ff>;
+ data-width = <4>;
+ clocks = <&ahb_clk>;
+ clock-names = "hclk";
+ multi-block = <1 1 1 1 1 1>;
+ };
+
+ i2c0: i2c@ff120000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xff120000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <12 8>;
+ clocks = <&ahb_clk>;
+ };
+ i2c1: i2c@ff121000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xff121000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <12 8>;
+ clocks = <&ahb_clk>;
+ };
+ i2c2: i2c@ff122000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xff122000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <12 8>;
+ clocks = <&ahb_clk>;
+ };
+ i2c3: i2c@ff123000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xff123000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <12 8>;
+ clocks = <&ahb_clk>;
+ };
+ i2c4: i2c@ff124000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xff124000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <12 8>;
+ clocks = <&ahb_clk>;
+ };
+
+ spi0: spi@fe010000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "abilis,tb100-spi";
+ num-cs = <1>;
+ reg = <0xfe010000 0x20>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <26 8>;
+ clocks = <&ahb_clk>;
+ };
+ spi1: spi@fe011000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "abilis,tb100-spi";
+ num-cs = <2>;
+ reg = <0xfe011000 0x20>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <10 8>;
+ clocks = <&ahb_clk>;
+ };
+
+ tb10x_tsm: tb10x-tsm@ff316000 {
+ compatible = "abilis,tb100-tsm";
+ reg = <0xff316000 0x400>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <17 8>;
+ output-clkdiv = <4>;
+ global-packet-delay = <0x21>;
+ port-packet-delay = <0>;
+ };
+ tb10x_stream_proc: tb10x-stream-proc {
+ compatible = "abilis,tb100-streamproc";
+ reg = <0xfff00000 0x200>,
+ <0x000f0000 0x10000>,
+ <0xfff00200 0x105>,
+ <0xff10600c 0x1>,
+ <0xfe001018 0x1>;
+ reg-names = "mbox",
+ "sp_iccm",
+ "mbox_irq",
+ "cpuctrl",
+ "a6it_int_force";
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <20 2>, <19 2>;
+ interrupt-names = "cmd_irq", "event_irq";
+ };
+ tb10x_mdsc0: tb10x-mdscr@ff300000 {
+ compatible = "abilis,tb100-mdscr";
+ reg = <0xff300000 0x7000>;
+ tb100-mdscr-manage-tsin;
+ };
+ tb10x_mscr0: tb10x-mdscr@ff307000 {
+ compatible = "abilis,tb100-mdscr";
+ reg = <0xff307000 0x7000>;
+ };
+ tb10x_scr0: tb10x-mdscr@ff30e000 {
+ compatible = "abilis,tb100-mdscr";
+ reg = <0xff30e000 0x4000>;
+ tb100-mdscr-manage-tsin;
+ };
+ tb10x_scr1: tb10x-mdscr@ff312000 {
+ compatible = "abilis,tb100-mdscr";
+ reg = <0xff312000 0x4000>;
+ tb100-mdscr-manage-tsin;
+ };
+ tb10x_wfb: tb10x-wfb@ff319000 {
+ compatible = "abilis,tb100-wfb";
+ reg = <0xff319000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <16 8>;
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
new file mode 100644
index 0000000000..2a151607b0
--- /dev/null
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Device tree for AXC001 770D/EM6/AS221 CPU card
+ * Note that this file only supports the 770D CPU
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "snps,arc";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpu_card {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
+
+ core_clk: core_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <750000000>;
+ };
+
+ input_clk: input-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <33333333>;
+ };
+
+ core_intc: arc700-intc@cpu {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ /*
+ * this GPIO block ORs all interrupts on CPU card (creg,..)
+ * to uplink only 1 IRQ to ARC core intc
+ */
+ dw-apb-gpio@2000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = < 0x2000 0x80 >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ictl_intc: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <30>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&core_intc>;
+ interrupts = <15>;
+ };
+ };
+
+ debug_uart: dw-apb-uart@5000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x5000 0x100>;
+ clock-frequency = <33333000>;
+ interrupt-parent = <&ictl_intc>;
+ interrupts = <19 4>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ arcpct0: pct {
+ compatible = "snps,arc700-pct";
+ };
+ };
+
+ /*
+ * This INTC is actually connected to DW APB GPIO
+ * which acts as a wire between MB INTC and CPU INTC.
+ * GPIO INTC is configured in platform init code
+ * and here we mimic direct connection from MB INTC to
+ * CPU INTC, thus we set "interrupts = <7>" instead of
+ * "interrupts = <12>"
+ *
+ * This intc actually resides on MB, but we move it here to
+ * avoid duplicating the MB dtsi file given that IRQ from
+ * this intc to cpu intc are different for axs101 and axs103
+ */
+ mb_intc: interrupt-controller@e0012000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dw-apb-ictl";
+ reg = < 0x0 0xe0012000 0x0 0x200 >;
+ interrupt-controller;
+ interrupt-parent = <&core_intc>;
+ interrupts = < 7 >;
+ };
+
+ memory {
+ device_type = "memory";
+ /* CONFIG_LINUX_RAM_BASE needs to match low mem start */
+ reg = <0x0 0x80000000 0x0 0x1b000000>; /* (512 - 32) MiB */
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ /*
+ * We just move frame buffer area to the very end of
+ * available DDR. And even though in case of ARC770 there's
+ * no strict requirement for a frame-buffer to be in any
+ * particular location it allows us to use the same
+ * base board's DT node for ARC PGU as for ARc HS38.
+ */
+ frame_buffer: frame_buffer@9e000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9e000000 0x0 0x2000000>;
+ no-map;
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
new file mode 100644
index 0000000000..3434c8131e
--- /dev/null
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Device tree for AXC003 CPU card: HS38x UP configuration
+ */
+
+/include/ "skeleton_hs.dtsi"
+
+/ {
+ compatible = "snps,arc";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpu_card {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
+
+ input_clk: input-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <33333333>;
+ };
+
+ core_clk: core-clk@80 {
+ compatible = "snps,axs10x-arc-pll-clock";
+ reg = <0x80 0x10>, <0x100 0x10>;
+ #clock-cells = <0>;
+ clocks = <&input_clk>;
+
+ /*
+ * Set initial core pll output frequency to 90MHz.
+ * It will be applied at the core pll driver probing
+ * on early boot.
+ */
+ assigned-clocks = <&core_clk>;
+ assigned-clock-rates = <90000000>;
+ };
+
+ core_intc: archs-intc@cpu {
+ compatible = "snps,archs-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ /*
+ * this GPIO block ORs all interrupts on CPU card (creg,..)
+ * to uplink only 1 IRQ to ARC core intc
+ */
+ dw-apb-gpio@2000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = < 0x2000 0x80 >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ictl_intc: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <30>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&core_intc>;
+ interrupts = <25>;
+ };
+ };
+
+ debug_uart: dw-apb-uart@5000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x5000 0x100>;
+ clock-frequency = <33333000>;
+ interrupt-parent = <&ictl_intc>;
+ interrupts = <2 4>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ arcpct0: pct {
+ compatible = "snps,archs-pct";
+ #interrupt-cells = <1>;
+ interrupt-parent = <&core_intc>;
+ interrupts = <20>;
+ };
+ };
+
+ /*
+ * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+ * it via overlay because peripherals defined in axs10x_mb.dtsi are
+ * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+ * only AXS103 board has HW-coherent DMA peripherals)
+ * We don't need to mark pgu@17000 as dma-coherent because it uses
+ * external DMA buffer located outside of IOC aperture.
+ */
+ axs10x_mb {
+ ethernet@18000 {
+ dma-coherent;
+ };
+
+ usb@40000 {
+ dma-coherent;
+ };
+
+ usb@60000 {
+ dma-coherent;
+ };
+
+ mmc@15000 {
+ dma-coherent;
+ };
+ };
+
+ /*
+ * The DW APB ICTL intc on MB is connected to CPU intc via a
+ * DT "invisible" DW APB GPIO block, configured to simply pass thru
+ * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
+ *
+ * So here we mimic a direct connection betwen them, ignoring the
+ * ABPG GPIO. Thus set "interrupts = <24>" (DW APB GPIO to core)
+ * instead of "interrupts = <12>" (DW APB ICTL to DW APB GPIO)
+ *
+ * This intc actually resides on MB, but we move it here to
+ * avoid duplicating the MB dtsi file given that IRQ from
+ * this intc to cpu intc are different for axs101 and axs103
+ */
+ mb_intc: interrupt-controller@e0012000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dw-apb-ictl";
+ reg = < 0x0 0xe0012000 0x0 0x200 >;
+ interrupt-controller;
+ interrupt-parent = <&core_intc>;
+ interrupts = < 24 >;
+ };
+
+ memory {
+ device_type = "memory";
+ /* CONFIG_LINUX_RAM_BASE needs to match low mem start */
+ reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MiB low mem */
+ 0x1 0xc0000000 0x0 0x40000000>; /* 1 GiB highmem */
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ /*
+ * Move frame buffer out of IOC aperture (0x8z-0xaz).
+ */
+ frame_buffer: frame_buffer@be000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0xbe000000 0x0 0x2000000>;
+ no-map;
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
new file mode 100644
index 0000000000..67556f4b70
--- /dev/null
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014, 2015 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Device tree for AXC003 CPU card: HS38x2 (Dual Core) with IDU intc
+ */
+
+/include/ "skeleton_hs_idu.dtsi"
+
+/ {
+ compatible = "snps,arc";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpu_card {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
+
+ input_clk: input-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <33333333>;
+ };
+
+ core_clk: core-clk@80 {
+ compatible = "snps,axs10x-arc-pll-clock";
+ reg = <0x80 0x10>, <0x100 0x10>;
+ #clock-cells = <0>;
+ clocks = <&input_clk>;
+
+ /*
+ * Set initial core pll output frequency to 100MHz.
+ * It will be applied at the core pll driver probing
+ * on early boot.
+ */
+ assigned-clocks = <&core_clk>;
+ assigned-clock-rates = <100000000>;
+ };
+
+ core_intc: archs-intc@cpu {
+ compatible = "snps,archs-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ idu_intc: idu-interrupt-controller {
+ compatible = "snps,archs-idu-intc";
+ interrupt-controller;
+ interrupt-parent = <&core_intc>;
+ #interrupt-cells = <1>;
+ };
+
+ /*
+ * this GPIO block ORs all interrupts on CPU card (creg,..)
+ * to uplink only 1 IRQ to ARC core intc
+ */
+ dw-apb-gpio@2000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = < 0x2000 0x80 >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ictl_intc: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <30>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&idu_intc>;
+ interrupts = <1>;
+ };
+ };
+
+ debug_uart: dw-apb-uart@5000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x5000 0x100>;
+ clock-frequency = <33333000>;
+ interrupt-parent = <&ictl_intc>;
+ interrupts = <2 4>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ arcpct0: pct {
+ compatible = "snps,archs-pct";
+ #interrupt-cells = <1>;
+ interrupt-parent = <&core_intc>;
+ interrupts = <20>;
+ };
+ };
+
+ /*
+ * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+ * it via overlay because peripherals defined in axs10x_mb.dtsi are
+ * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+ * only AXS103 board has HW-coherent DMA peripherals)
+ * We don't need to mark pgu@17000 as dma-coherent because it uses
+ * external DMA buffer located outside of IOC aperture.
+ */
+ axs10x_mb {
+ ethernet@18000 {
+ dma-coherent;
+ };
+
+ usb@40000 {
+ dma-coherent;
+ };
+
+ usb@60000 {
+ dma-coherent;
+ };
+
+ mmc@15000 {
+ dma-coherent;
+ };
+ };
+
+ /*
+ * This INTC is actually connected to DW APB GPIO
+ * which acts as a wire between MB INTC and CPU INTC.
+ * GPIO INTC is configured in platform init code
+ * and here we mimic direct connection from MB INTC to
+ * CPU INTC, thus we set "interrupts = <0 1>" instead of
+ * "interrupts = <12>"
+ *
+ * This intc actually resides on MB, but we move it here to
+ * avoid duplicating the MB dtsi file given that IRQ from
+ * this intc to cpu intc are different for axs101 and axs103
+ */
+ mb_intc: interrupt-controller@e0012000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dw-apb-ictl";
+ reg = < 0x0 0xe0012000 0x0 0x200 >;
+ interrupt-controller;
+ interrupt-parent = <&idu_intc>;
+ interrupts = <0>;
+ };
+
+ memory {
+ device_type = "memory";
+ /* CONFIG_LINUX_RAM_BASE needs to match low mem start */
+ reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MiB low mem */
+ 0x1 0xc0000000 0x0 0x40000000>; /* 1 GiB highmem */
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ /*
+ * Move frame buffer out of IOC aperture (0x8z-0xaz).
+ */
+ frame_buffer: frame_buffer@be000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0xbe000000 0x0 0x2000000>;
+ no-map;
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/axs101.dts b/arch/arc/boot/dts/axs101.dts
new file mode 100644
index 0000000000..c4cfc5f4f4
--- /dev/null
+++ b/arch/arc/boot/dts/axs101.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * ARC AXS101 S/W development platform
+ */
+/dts-v1/;
+
+/include/ "axc001.dtsi"
+/include/ "axs10x_mb.dtsi"
+
+/ {
+ model = "snps,axs101";
+ compatible = "snps,axs101", "snps,arc-sdp";
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 print-fatal-signals=1";
+ };
+};
diff --git a/arch/arc/boot/dts/axs103.dts b/arch/arc/boot/dts/axs103.dts
new file mode 100644
index 0000000000..16ccb7ba7a
--- /dev/null
+++ b/arch/arc/boot/dts/axs103.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Device Tree for AXS103 SDP with AXS10X Main Board and
+ * AXC003 FPGA Card (with UP bitfile)
+ */
+/dts-v1/;
+
+/include/ "axc003.dtsi"
+/include/ "axs10x_mb.dtsi"
+
+/ {
+ model = "snps,axs103";
+ compatible = "snps,axs103", "snps,arc-sdp";
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=ttyS3,115200n8 debug print-fatal-signals=1";
+ };
+};
diff --git a/arch/arc/boot/dts/axs103_idu.dts b/arch/arc/boot/dts/axs103_idu.dts
new file mode 100644
index 0000000000..a934b92a8c
--- /dev/null
+++ b/arch/arc/boot/dts/axs103_idu.dts
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Device Tree for AXS103 SDP with AXS10X Main Board and
+ * AXC003 FPGA Card (with SMP bitfile)
+ */
+/dts-v1/;
+
+/include/ "axc003_idu.dtsi"
+/include/ "axs10x_mb.dtsi"
+
+/ {
+ model = "snps,axs103-smp";
+ compatible = "snps,axs103", "snps,arc-sdp";
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 print-fatal-signals=1 consoleblank=0";
+ };
+};
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
new file mode 100644
index 0000000000..b644353853
--- /dev/null
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Support for peripherals on the AXS10x mainboard
+ *
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/ {
+ aliases {
+ ethernet = &gmac;
+ };
+
+ axs10x_mb {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x0 0xe0000000 0x10000000>;
+ interrupt-parent = <&mb_intc>;
+
+ creg_rst: reset-controller@11220 {
+ compatible = "snps,axs10x-reset";
+ #reset-cells = <1>;
+ reg = <0x11220 0x4>;
+ };
+
+ i2sclk: i2sclk@100a0 {
+ compatible = "snps,axs10x-i2s-pll-clock";
+ reg = <0x100a0 0x10>;
+ clocks = <&i2spll_clk>;
+ #clock-cells = <0>;
+ };
+
+ clocks {
+ i2spll_clk: i2spll_clk {
+ compatible = "fixed-clock";
+ clock-frequency = <27000000>;
+ #clock-cells = <0>;
+ };
+
+ i2cclk: i2cclk {
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ #clock-cells = <0>;
+ };
+
+ apbclk: apbclk {
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ #clock-cells = <0>;
+ };
+
+ mmcclk: mmcclk {
+ compatible = "fixed-clock";
+ /*
+ * DW sdio controller has external ciu clock divider
+ * controlled via register in SDIO IP. It divides
+ * sdio_ref_clk (which comes from CGU) by 16 for
+ * default. So default mmcclk clock (which comes
+ * to sdk_in) is 25000000 Hz.
+ */
+ clock-frequency = <25000000>;
+ #clock-cells = <0>;
+ };
+ };
+
+ pguclk: pguclk@10080 {
+ compatible = "snps,axs10x-pgu-pll-clock";
+ reg = <0x10080 0x10>, <0x110 0x10>;
+ #clock-cells = <0>;
+ clocks = <&input_clk>;
+ };
+
+ gmac: ethernet@18000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dwmac";
+ reg = < 0x18000 0x2000 >;
+ interrupts = < 4 >;
+ interrupt-names = "macirq";
+ phy-mode = "rgmii";
+ snps,pbl = < 32 >;
+ snps,multicast-filter-bins = <256>;
+ clocks = <&apbclk>;
+ clock-names = "stmmaceth";
+ max-speed = <100>;
+ resets = <&creg_rst 5>;
+ reset-names = "stmmaceth";
+ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
+ };
+
+ usb@40000 {
+ compatible = "generic-ehci";
+ reg = < 0x40000 0x100 >;
+ interrupts = < 8 >;
+ };
+
+ usb@60000 {
+ compatible = "generic-ohci";
+ reg = < 0x60000 0x100 >;
+ interrupts = < 8 >;
+ };
+
+ /*
+ * According to DW Mobile Storage databook it is required
+ * to use "Hold Register" if card is enumerated in SDR12 or
+ * SDR25 modes.
+ *
+ * Utilization of "Hold Register" is already implemented via
+ * dw_mci_pltfm_prepare_command() which in its turn gets
+ * used through dw_mci_drv_data->prepare_command call-back.
+ * This call-back is used in Altera Socfpga platform and so
+ * we may reuse it saying that we're compatible with their
+ * "altr,socfpga-dw-mshc".
+ *
+ * Most probably "Hold Register" utilization is platform-
+ * independent requirement which means that single unified
+ * "snps,dw-mshc" should be enough for all users of DW MMC once
+ * dw_mci_pltfm_prepare_command() is used in generic platform
+ * code.
+ */
+ mmc@15000 {
+ compatible = "altr,socfpga-dw-mshc";
+ reg = < 0x15000 0x400 >;
+ fifo-depth = < 16 >;
+ card-detect-delay = < 200 >;
+ clocks = <&apbclk>, <&mmcclk>;
+ clock-names = "biu", "ciu";
+ interrupts = < 7 >;
+ bus-width = < 4 >;
+ };
+
+ uart@20000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x20000 0x100>;
+ clock-frequency = <33333333>;
+ interrupts = <17>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ uart@21000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x21000 0x100>;
+ clock-frequency = <33333333>;
+ interrupts = <18>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ /* UART muxed with USB data port (ttyS3) */
+ uart@22000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x22000 0x100>;
+ clock-frequency = <33333333>;
+ interrupts = <19>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ i2c@1d000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x1d000 0x100>;
+ clock-frequency = <400000>;
+ clocks = <&i2cclk>;
+ interrupts = <14>;
+ };
+
+ i2s: i2s@1e000 {
+ compatible = "snps,designware-i2s";
+ reg = <0x1e000 0x100>;
+ clocks = <&i2sclk 0>;
+ clock-names = "i2sclk";
+ interrupts = <15>;
+ #sound-dai-cells = <0>;
+ };
+
+ i2c@1f000 {
+ compatible = "snps,designware-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1f000 0x100>;
+ clock-frequency = <400000>;
+ clocks = <&i2cclk>;
+ interrupts = <16>;
+
+ adv7511:adv7511@39{
+ compatible="adi,adv7511";
+ reg = <0x39>;
+ interrupts = <23>;
+ adi,input-depth = <8>;
+ adi,input-colorspace = "rgb";
+ adi,input-clock = "1x";
+ adi,clock-delay = <0x03>;
+ #sound-dai-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* RGB/YUV input */
+ port@0 {
+ reg = <0>;
+ adv7511_input:endpoint {
+ remote-endpoint = <&pgu_output>;
+ };
+ };
+
+ /* HDMI output */
+ port@1 {
+ reg = <1>;
+ adv7511_output: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
+
+ eeprom@54{
+ compatible = "atmel,24c01";
+ reg = <0x54>;
+ pagesize = <0x8>;
+ };
+
+ eeprom@57{
+ compatible = "atmel,24c04";
+ reg = <0x57>;
+ pagesize = <0x8>;
+ };
+ };
+
+ hdmi0: connector {
+ compatible = "hdmi-connector";
+ type = "a";
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&adv7511_output>;
+ };
+ };
+ };
+
+ gpio0:gpio@13000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x13000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio0_banka: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ };
+
+ gpio0_bankb: gpio-controller@1 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <8>;
+ reg = <1>;
+ };
+
+ gpio0_bankc: gpio-controller@2 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <8>;
+ reg = <2>;
+ };
+ };
+
+ gpio1:gpio@14000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x14000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio1_banka: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <30>;
+ reg = <0>;
+ };
+
+ gpio1_bankb: gpio-controller@1 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <10>;
+ reg = <1>;
+ };
+
+ gpio1_bankc: gpio-controller@2 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <8>;
+ reg = <2>;
+ };
+ };
+
+ pgu@17000 {
+ compatible = "snps,arcpgu";
+ reg = <0x17000 0x400>;
+ clocks = <&pguclk>;
+ clock-names = "pxlclk";
+ memory-region = <&frame_buffer>;
+ port {
+ pgu_output: endpoint {
+ remote-endpoint = <&adv7511_input>;
+ };
+ };
+ };
+
+ sound_playback {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "AXS10x HDMI Audio";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,cpu {
+ sound-dai = <&i2s>;
+ };
+ simple-audio-card,codec {
+ sound-dai = <&adv7511>;
+ };
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/haps_hs.dts b/arch/arc/boot/dts/haps_hs.dts
new file mode 100644
index 0000000000..76ad527a08
--- /dev/null
+++ b/arch/arc/boot/dts/haps_hs.dts
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016-2014 Synopsys, Inc. (www.synopsys.com)
+ */
+/dts-v1/;
+
+/include/ "skeleton_hs.dtsi"
+
+/ {
+ model = "snps,zebu_hs";
+ compatible = "snps,zebu_hs";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&core_intc>;
+
+ memory {
+ device_type = "memory";
+ /* CONFIG_LINUX_RAM_BASE needs to match low mem start */
+ reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */
+ 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
+ };
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ fpga {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* only perip space at end of low mem accessible
+ bus addr, parent bus addr, size */
+ ranges = <0x80000000 0x0 0x80000000 0x80000000>;
+
+ core_clk: core_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ };
+
+ core_intc: interrupt-controller {
+ compatible = "snps,archs-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ uart0: serial@f0000000 {
+ compatible = "ns16550a";
+ reg = <0xf0000000 0x2000>;
+ interrupts = <24>;
+ clock-frequency = <50000000>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ no-loopback-test = <1>;
+ };
+
+ arcpct0: pct {
+ compatible = "snps,archs-pct";
+ #interrupt-cells = <1>;
+ interrupts = <20>;
+ };
+
+ virtio0: virtio@f0100000 {
+ compatible = "virtio,mmio";
+ reg = <0xf0100000 0x2000>;
+ interrupts = <31>;
+ };
+
+ virtio1: virtio@f0102000 {
+ compatible = "virtio,mmio";
+ reg = <0xf0102000 0x2000>;
+ interrupts = <32>;
+ };
+
+ virtio2: virtio@f0104000 {
+ compatible = "virtio,mmio";
+ reg = <0xf0104000 0x2000>;
+ interrupts = <33>;
+ };
+
+ virtio3: virtio@f0106000 {
+ compatible = "virtio,mmio";
+ reg = <0xf0106000 0x2000>;
+ interrupts = <34>;
+ };
+
+ virtio4: virtio@f0108000 {
+ compatible = "virtio,mmio";
+ reg = <0xf0108000 0x2000>;
+ interrupts = <35>;
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/haps_hs_idu.dts b/arch/arc/boot/dts/haps_hs_idu.dts
new file mode 100644
index 0000000000..738c76cd07
--- /dev/null
+++ b/arch/arc/boot/dts/haps_hs_idu.dts
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016-2014 Synopsys, Inc. (www.synopsys.com)
+ */
+/dts-v1/;
+
+/include/ "skeleton_hs_idu.dtsi"
+
+/ {
+ model = "snps,zebu_hs-smp";
+ compatible = "snps,zebu_hs";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&core_intc>;
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>; /* 512 */
+ };
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ fpga {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* child and parent address space 1:1 mapped */
+ ranges;
+
+ core_clk: core_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>; /* 50 MHZ */
+ };
+
+ core_intc: interrupt-controller {
+ compatible = "snps,archs-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ idu_intc: idu-interrupt-controller {
+ compatible = "snps,archs-idu-intc";
+ interrupt-controller;
+ interrupt-parent = <&core_intc>;
+ #interrupt-cells = <1>;
+ };
+
+ uart0: serial@f0000000 {
+ compatible = "ns16550a";
+ reg = <0xf0000000 0x2000>;
+ interrupt-parent = <&idu_intc>;
+ interrupts = <0>;
+ clock-frequency = <50000000>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ no-loopback-test = <1>;
+ };
+
+ arcpct0: pct {
+ compatible = "snps,archs-pct";
+ #interrupt-cells = <1>;
+ interrupts = <20>;
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
new file mode 100644
index 0000000000..6691f42550
--- /dev/null
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Device Tree for ARC HS Development Kit
+ */
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/reset/snps,hsdk-reset.h>
+
+/ {
+ model = "snps,hsdk";
+ compatible = "snps,hsdk";
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
+ };
+
+ aliases {
+ ethernet = &gmac;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <0>;
+ clocks = <&core_clk>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <1>;
+ clocks = <&core_clk>;
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <2>;
+ clocks = <&core_clk>;
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <3>;
+ clocks = <&core_clk>;
+ };
+ };
+
+ input_clk: input-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <33333333>;
+ };
+
+ reg_5v0: regulator-5v0 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "5v0-supply";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ cpu_intc: cpu-interrupt-controller {
+ compatible = "snps,archs-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ idu_intc: idu-interrupt-controller {
+ compatible = "snps,archs-idu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&cpu_intc>;
+ };
+
+ arcpct: pct {
+ compatible = "snps,archs-pct";
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <20>;
+ };
+
+ /* TIMER0 with interrupt for clockevent */
+ timer {
+ compatible = "snps,arc-timer";
+ interrupts = <16>;
+ interrupt-parent = <&cpu_intc>;
+ clocks = <&core_clk>;
+ };
+
+ /* 64-bit Global Free Running Counter */
+ gfrc {
+ compatible = "snps,archs-timer-gfrc";
+ clocks = <&core_clk>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&idu_intc>;
+
+ ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
+
+ cgu_rst: reset-controller@8a0 {
+ compatible = "snps,hsdk-reset";
+ #reset-cells = <1>;
+ reg = <0x8a0 0x4>, <0xff0 0x4>;
+ };
+
+ core_clk: core-clk@0 {
+ compatible = "snps,hsdk-core-pll-clock";
+ reg = <0x00 0x10>, <0x14b8 0x4>;
+ #clock-cells = <0>;
+ clocks = <&input_clk>;
+
+ /*
+ * Set initial core pll output frequency to 1GHz.
+ * It will be applied at the core pll driver probing
+ * on early boot.
+ */
+ assigned-clocks = <&core_clk>;
+ assigned-clock-rates = <1000000000>;
+ };
+
+ serial: serial@5000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x5000 0x100>;
+ clock-frequency = <33330000>;
+ interrupts = <6>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ gmacclk: gmacclk {
+ compatible = "fixed-clock";
+ clock-frequency = <400000000>;
+ #clock-cells = <0>;
+ };
+
+ mmcclk_ciu: mmcclk-ciu {
+ compatible = "fixed-clock";
+ /*
+ * DW sdio controller has external ciu clock divider
+ * controlled via register in SDIO IP. Due to its
+ * unexpected default value (it should divide by 1
+ * but it divides by 8) SDIO IP uses wrong clock and
+ * works unstable (see STAR 9001204800)
+ * We switched to the minimum possible value of the
+ * divisor (div-by-2) in HSDK platform code.
+ * So add temporary fix and change clock frequency
+ * to 50000000 Hz until we fix dw sdio driver itself.
+ */
+ clock-frequency = <50000000>;
+ #clock-cells = <0>;
+ };
+
+ mmcclk_biu: mmcclk-biu {
+ compatible = "fixed-clock";
+ clock-frequency = <400000000>;
+ #clock-cells = <0>;
+ };
+
+ gpu_core_clk: gpu-core-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <400000000>;
+ #clock-cells = <0>;
+ };
+
+ gpu_dma_clk: gpu-dma-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <400000000>;
+ #clock-cells = <0>;
+ };
+
+ gpu_cfg_clk: gpu-cfg-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <200000000>;
+ #clock-cells = <0>;
+ };
+
+ dmac_core_clk: dmac-core-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <400000000>;
+ #clock-cells = <0>;
+ };
+
+ dmac_cfg_clk: dmac-gpu-cfg-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <200000000>;
+ #clock-cells = <0>;
+ };
+
+ gmac: ethernet@8000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dwmac";
+ reg = <0x8000 0x2000>;
+ interrupts = <10>;
+ interrupt-names = "macirq";
+ phy-mode = "rgmii-id";
+ snps,pbl = <32>;
+ snps,multicast-filter-bins = <256>;
+ clocks = <&gmacclk>;
+ clock-names = "stmmaceth";
+ phy-handle = <&phy0>;
+ resets = <&cgu_rst HSDK_ETH_RESET>;
+ reset-names = "stmmaceth";
+ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
+ dma-coherent;
+
+ tx-fifo-depth = <4096>;
+ rx-fifo-depth = <4096>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 { /* Micrel KSZ9031 */
+ reg = <0>;
+ };
+ };
+ };
+
+ usb@60000 {
+ compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
+ reg = <0x60000 0x100>;
+ interrupts = <15>;
+ resets = <&cgu_rst HSDK_USB_RESET>;
+ dma-coherent;
+ };
+
+ usb@40000 {
+ compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
+ reg = <0x40000 0x100>;
+ interrupts = <15>;
+ resets = <&cgu_rst HSDK_USB_RESET>;
+ dma-coherent;
+ };
+
+ mmc@a000 {
+ compatible = "altr,socfpga-dw-mshc";
+ reg = <0xa000 0x400>;
+ num-slots = <1>;
+ fifo-depth = <16>;
+ card-detect-delay = <200>;
+ clocks = <&mmcclk_biu>, <&mmcclk_ciu>;
+ clock-names = "biu", "ciu";
+ interrupts = <12>;
+ bus-width = <4>;
+ dma-coherent;
+ };
+
+ spi0: spi@20000 {
+ compatible = "snps,dw-apb-ssi";
+ reg = <0x20000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <16>;
+ num-cs = <2>;
+ reg-io-width = <4>;
+ clocks = <&input_clk>;
+ cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
+ <&creg_gpio 1 GPIO_ACTIVE_LOW>;
+
+ flash@0 {
+ compatible = "sst26wf016b", "jedec,spi-nor";
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <4000000>;
+ };
+
+ adc@1 {
+ compatible = "ti,adc108s102";
+ reg = <1>;
+ vref-supply = <&reg_5v0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ creg_gpio: gpio@14b0 {
+ compatible = "snps,creg-gpio-hsdk";
+ reg = <0x14b0 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ };
+
+ gpio: gpio@3000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x3000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio_port_a: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <24>;
+ reg = <0>;
+ };
+ };
+
+ gpu_3d: gpu@90000 {
+ compatible = "vivante,gc";
+ reg = <0x90000 0x4000>;
+ clocks = <&gpu_dma_clk>,
+ <&gpu_cfg_clk>,
+ <&gpu_core_clk>,
+ <&gpu_core_clk>;
+ clock-names = "bus", "reg", "core", "shader";
+ interrupts = <28>;
+ };
+
+ dmac: dmac@80000 {
+ compatible = "snps,axi-dma-1.01a";
+ reg = <0x80000 0x400>;
+ interrupts = <27>;
+ clocks = <&dmac_core_clk>, <&dmac_cfg_clk>;
+ clock-names = "core-clk", "cfgr-clk";
+
+ dma-channels = <4>;
+ snps,dma-masters = <2>;
+ snps,data-width = <3>;
+ snps,block-size = <4096 4096 4096 4096>;
+ snps,priority = <0 1 2 3>;
+ snps,axi-max-burst-len = <16>;
+ };
+ };
+
+ memory@80000000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x40000000>; /* 1 GB lowmem */
+ /* 0x1 0x00000000 0x0 0x40000000>; 1 GB highmem */
+ };
+};
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
new file mode 100644
index 0000000000..f8832a15e1
--- /dev/null
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+ model = "snps,nsim";
+ compatible = "snps,nsim";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&core_intc>;
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 print-fatal-signals=1";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ fpga {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* child and parent address space 1:1 mapped */
+ ranges;
+
+ core_clk: core_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <80000000>;
+ };
+
+ core_intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ uart0: serial@f0000000 {
+ compatible = "ns16550a";
+ reg = <0xf0000000 0x2000>;
+ interrupts = <24>;
+ clock-frequency = <50000000>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ no-loopback-test = <1>;
+ };
+
+ arcpct0: pct {
+ compatible = "snps,arc700-pct";
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
new file mode 100644
index 0000000000..fc207c4a4e
--- /dev/null
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+ model = "snps,nsimosci";
+ compatible = "snps,nsimosci";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&core_intc>;
+
+ chosen {
+ /* this is for console on PGU */
+ /* bootargs = "console=tty0 consoleblank=0"; */
+ /* this is for console on serial */
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ fpga {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* child and parent address space 1:1 mapped */
+ ranges;
+
+ core_clk: core_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <20000000>;
+ };
+
+ core_intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ uart0: serial@f0000000 {
+ compatible = "ns8250";
+ reg = <0xf0000000 0x2000>;
+ interrupts = <11>;
+ clock-frequency = <3686400>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ no-loopback-test = <1>;
+ };
+
+ pguclk: pguclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25175000>;
+ };
+
+ pgu@f9000000 {
+ compatible = "snps,arcpgu";
+ reg = <0xf9000000 0x400>;
+ clocks = <&pguclk>;
+ clock-names = "pxlclk";
+ };
+
+ ps2: ps2@f9001000 {
+ compatible = "snps,arc_ps2";
+ reg = <0xf9000400 0x14>;
+ interrupts = <13>;
+ interrupt-names = "arc_ps2_irq";
+ };
+
+ eth0: ethernet@f0003000 {
+ compatible = "ezchip,nps-mgt-enet";
+ reg = <0xf0003000 0x44>;
+ interrupts = <7>;
+ };
+
+ arcpct0: pct {
+ compatible = "snps,arc700-pct";
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts
new file mode 100644
index 0000000000..71f1f84161
--- /dev/null
+++ b/arch/arc/boot/dts/nsimosci_hs.dts
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+/dts-v1/;
+
+/include/ "skeleton_hs.dtsi"
+
+/ {
+ model = "snps,nsimosci_hs";
+ compatible = "snps,nsimosci_hs";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&core_intc>;
+
+ chosen {
+ /* this is for console on PGU */
+ /* bootargs = "console=tty0 consoleblank=0"; */
+ /* this is for console on serial */
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ fpga {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* child and parent address space 1:1 mapped */
+ ranges;
+
+ core_clk: core_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <20000000>;
+ };
+
+ core_intc: core-interrupt-controller {
+ compatible = "snps,archs-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ uart0: serial@f0000000 {
+ compatible = "ns8250";
+ reg = <0xf0000000 0x2000>;
+ interrupts = <24>;
+ clock-frequency = <3686400>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ no-loopback-test = <1>;
+ };
+
+ pguclk: pguclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25175000>;
+ };
+
+ pgu@f9000000 {
+ compatible = "snps,arcpgu";
+ reg = <0xf9000000 0x400>;
+ clocks = <&pguclk>;
+ clock-names = "pxlclk";
+ };
+
+ ps2: ps2@f9001000 {
+ compatible = "snps,arc_ps2";
+ reg = <0xf9000400 0x14>;
+ interrupts = <27>;
+ interrupt-names = "arc_ps2_irq";
+ };
+
+ eth0: ethernet@f0003000 {
+ compatible = "ezchip,nps-mgt-enet";
+ reg = <0xf0003000 0x44>;
+ interrupts = <25>;
+ };
+
+ arcpct0: pct {
+ compatible = "snps,archs-pct";
+ #interrupt-cells = <1>;
+ interrupts = <20>;
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts
new file mode 100644
index 0000000000..69d794c59d
--- /dev/null
+++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+/dts-v1/;
+
+/include/ "skeleton_hs_idu.dtsi"
+
+/ {
+ model = "snps,nsimosci_hs-smp";
+ compatible = "snps,nsimosci_hs";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&core_intc>;
+
+ chosen {
+ /* this is for console on serial */
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24 print-fatal-signals=1";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ fpga {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* child and parent address space 1:1 mapped */
+ ranges;
+
+ core_clk: core_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <5000000>;
+ };
+
+ core_intc: core-interrupt-controller {
+ compatible = "snps,archs-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ idu_intc: idu-interrupt-controller {
+ compatible = "snps,archs-idu-intc";
+ interrupt-controller;
+ interrupt-parent = <&core_intc>;
+ #interrupt-cells = <1>;
+ };
+
+ uart0: serial@f0000000 {
+ compatible = "ns8250";
+ reg = <0xf0000000 0x2000>;
+ interrupt-parent = <&idu_intc>;
+ interrupts = <0>;
+ clock-frequency = <3686400>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ no-loopback-test = <1>;
+ };
+
+ pguclk: pguclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25175000>;
+ };
+
+ pgu@f9000000 {
+ compatible = "snps,arcpgu";
+ reg = <0xf9000000 0x400>;
+ clocks = <&pguclk>;
+ clock-names = "pxlclk";
+ };
+
+ ps2: ps2@f9001000 {
+ compatible = "snps,arc_ps2";
+ reg = <0xf9000400 0x14>;
+ interrupts = <3>;
+ interrupt-parent = <&idu_intc>;
+ interrupt-names = "arc_ps2_irq";
+ };
+
+ eth0: ethernet@f0003000 {
+ compatible = "ezchip,nps-mgt-enet";
+ reg = <0xf0003000 0x44>;
+ interrupt-parent = <&idu_intc>;
+ interrupts = <1>;
+ };
+
+ arcpct0: pct {
+ compatible = "snps,archs-pct";
+ #interrupt-cells = <1>;
+ interrupts = <20>;
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
new file mode 100644
index 0000000000..ba86b8036a
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Skeleton device tree; the bare minimum needed to boot; just include and
+ * add a compatible value.
+ */
+
+/ {
+ compatible = "snps,arc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chosen { };
+ aliases { };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "snps,arc770d";
+ reg = <0>;
+ clocks = <&core_clk>;
+ };
+ };
+
+ /* TIMER0 with interrupt for clockevent */
+ timer0 {
+ compatible = "snps,arc-timer";
+ interrupts = <3>;
+ interrupt-parent = <&core_intc>;
+ clocks = <&core_clk>;
+ };
+
+ /* TIMER1 for free running clocksource */
+ timer1 {
+ compatible = "snps,arc-timer";
+ clocks = <&core_clk>;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256M */
+ };
+};
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi
new file mode 100644
index 0000000000..8fb49890e8
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton_hs.dtsi
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/ {
+ compatible = "snps,arc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chosen { };
+ aliases { };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <0>;
+ clocks = <&core_clk>;
+ };
+ };
+
+ /* TIMER0 with interrupt for clockevent */
+ timer0 {
+ compatible = "snps,arc-timer";
+ interrupts = <16>;
+ interrupt-parent = <&core_intc>;
+ clocks = <&core_clk>;
+ };
+
+ /* 64-bit Local RTC: preferred clocksource for UP */
+ rtc {
+ compatible = "snps,archs-timer-rtc";
+ clocks = <&core_clk>;
+ };
+
+ /* TIMER1 for free running clocksource: Fallback if rtc not found */
+ timer1 {
+ compatible = "snps,arc-timer";
+ clocks = <&core_clk>;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256M */
+ };
+};
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
new file mode 100644
index 0000000000..75f5c9ecb5
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/ {
+ compatible = "snps,arc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chosen { };
+ aliases { };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <0>;
+ clocks = <&core_clk>;
+ };
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <1>;
+ clocks = <&core_clk>;
+ };
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <2>;
+ clocks = <&core_clk>;
+ };
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "snps,archs38";
+ reg = <3>;
+ clocks = <&core_clk>;
+ };
+ };
+
+ /* TIMER0 with interrupt for clockevent */
+ timer0 {
+ compatible = "snps,arc-timer";
+ interrupts = <16>;
+ interrupt-parent = <&core_intc>;
+ clocks = <&core_clk>;
+ };
+
+ /* 64-bit Global Free Running Counter */
+ gfrc {
+ compatible = "snps,archs-timer-gfrc";
+ clocks = <&core_clk>;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256M */
+ };
+};
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
new file mode 100644
index 0000000000..c21d0eb07b
--- /dev/null
+++ b/arch/arc/boot/dts/vdk_axc003.dtsi
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013, 2014 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Device tree for AXC003 CPU card: HS38x UP configuration (VDK version)
+ */
+
+/include/ "skeleton_hs.dtsi"
+
+/ {
+ compatible = "snps,arc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpu_card {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x00000000 0xf0000000 0x10000000>;
+
+ core_clk: core_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ };
+
+ core_intc: archs-intc@cpu {
+ compatible = "snps,archs-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ debug_uart: dw-apb-uart@5000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x5000 0x100>;
+ clock-frequency = <2403200>;
+ interrupt-parent = <&core_intc>;
+ interrupts = <19>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ };
+
+ mb_intc: interrupt-controller@e0012000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dw-apb-ictl";
+ reg = < 0xe0012000 0x200 >;
+ interrupt-controller;
+ interrupt-parent = <&core_intc>;
+ interrupts = < 18 >;
+ };
+
+ memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x80000000 0x40000000>;
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>; /* 512MiB */
+ };
+};
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
new file mode 100644
index 0000000000..4d348853ac
--- /dev/null
+++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014, 2015 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Device tree for AXC003 CPU card:
+ * HS38x2 (Dual Core) with IDU intc (VDK version)
+ */
+
+/include/ "skeleton_hs_idu.dtsi"
+
+/ {
+ compatible = "snps,arc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpu_card {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x00000000 0xf0000000 0x10000000>;
+
+ core_clk: core_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ };
+
+ core_intc: archs-intc@cpu {
+ compatible = "snps,archs-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ idu_intc: idu-interrupt-controller {
+ compatible = "snps,archs-idu-intc";
+ interrupt-controller;
+ interrupt-parent = <&core_intc>;
+ #interrupt-cells = <1>;
+ };
+
+ debug_uart: dw-apb-uart@5000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x5000 0x100>;
+ clock-frequency = <2403200>;
+ interrupt-parent = <&idu_intc>;
+ interrupts = <2>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ };
+
+ mb_intc: interrupt-controller@e0012000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dw-apb-ictl";
+ reg = < 0xe0012000 0x200 >;
+ interrupt-controller;
+ interrupt-parent = <&idu_intc>;
+ interrupts = <0>;
+ };
+
+ memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x80000000 0x40000000>;
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>; /* 512MiB */
+ };
+};
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
new file mode 100644
index 0000000000..90a412026e
--- /dev/null
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Support for peripherals on the AXS10x mainboard (VDK version)
+ *
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/ {
+ axs10x_mb_vdk {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0xe0000000 0x10000000>;
+ interrupt-parent = <&mb_intc>;
+
+ clocks {
+ apbclk: apbclk {
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ #clock-cells = <0>;
+ };
+
+ mmcclk: mmcclk {
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ #clock-cells = <0>;
+ };
+
+ pguclk: pguclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25175000>;
+ };
+ };
+
+ ethernet@18000 {
+ #interrupt-cells = <1>;
+ compatible = "snps,dwmac";
+ reg = < 0x18000 0x2000 >;
+ interrupts = < 4 >;
+ interrupt-names = "macirq";
+ phy-mode = "rgmii";
+ snps,phy-addr = < 0 >; // VDK model phy address is 0
+ snps,pbl = < 32 >;
+ clocks = <&apbclk>;
+ clock-names = "stmmaceth";
+ };
+
+ usb@40000 {
+ compatible = "generic-ehci";
+ reg = < 0x40000 0x100 >;
+ interrupts = < 8 >;
+ };
+
+ uart@20000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x20000 0x100>;
+ clock-frequency = <2403200>;
+ interrupts = <17>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ uart@21000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x21000 0x100>;
+ clock-frequency = <2403200>;
+ interrupts = <18>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ uart@22000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x22000 0x100>;
+ clock-frequency = <2403200>;
+ interrupts = <19>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+/* PGU output directly sent to virtual LCD screen; hdmi controller not modelled */
+ pgu@17000 {
+ compatible = "snps,arcpgu";
+ reg = <0x17000 0x400>;
+ clocks = <&pguclk>;
+ clock-names = "pxlclk";
+ };
+
+/* VDK has additional ps2 keyboard/mouse interface integrated in LCD screen model */
+ ps2: ps2@e0017400 {
+ compatible = "snps,arc_ps2";
+ reg = <0x17400 0x14>;
+ interrupts = <5>;
+ interrupt-names = "arc_ps2_irq";
+ };
+
+ mmc@15000 {
+ compatible = "snps,dw-mshc";
+ reg = <0x15000 0x400>;
+ fifo-depth = <1024>;
+ card-detect-delay = <200>;
+ clocks = <&apbclk>, <&mmcclk>;
+ clock-names = "biu", "ciu";
+ interrupts = <7>;
+ bus-width = <4>;
+ };
+ };
+
+ /*
+ * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
+ *
+ * This node is intentionally put outside of MB above becase
+ * it maps areas outside of MB's 0xez-0xfz.
+ */
+ uio_ev: uio@d0000000 {
+ compatible = "generic-uio";
+ reg = <0xd0000000 0x2000 0xd1000000 0x2000 0x90000000 0x10000000 0xc0000000 0x10000000>;
+ reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
+ interrupt-parent = <&mb_intc>;
+ interrupts = <23>;
+ };
+};
diff --git a/arch/arc/boot/dts/vdk_hs38.dts b/arch/arc/boot/dts/vdk_hs38.dts
new file mode 100644
index 0000000000..cddea7eaca
--- /dev/null
+++ b/arch/arc/boot/dts/vdk_hs38.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * ARC HS38 Virtual Development Kit (VDK)
+ */
+/dts-v1/;
+
+/include/ "vdk_axc003.dtsi"
+/include/ "vdk_axs10x_mb.dtsi"
+
+/ {
+ model = "snps,vdk_archs";
+ compatible = "snps,axs103";
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0";
+ };
+};
diff --git a/arch/arc/boot/dts/vdk_hs38_smp.dts b/arch/arc/boot/dts/vdk_hs38_smp.dts
new file mode 100644
index 0000000000..f57d1922ee
--- /dev/null
+++ b/arch/arc/boot/dts/vdk_hs38_smp.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * ARC HS38 Virtual Development Kit, SMP version (VDK)
+ */
+/dts-v1/;
+
+/include/ "vdk_axc003_idu.dtsi"
+/include/ "vdk_axs10x_mb.dtsi"
+
+/ {
+ model = "snps,vdk_archs-smp";
+ compatible = "snps,axs103";
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=640x480-24";
+ };
+};
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
new file mode 100644
index 0000000000..89720d6d7e
--- /dev/null
+++ b/arch/arc/configs/axs101_defconfig
@@ -0,0 +1,104 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARC_PLAT_AXS10X=y
+CONFIG_AXS101=y
+CONFIG_ARC_CACHE_LINE_SHIFT=5
+CONFIG_ARC_BUILTIN_DTB_NAME="axs101"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NATIONAL_PHY=y
+# CONFIG_USB_NET_DRIVERS is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_MOUSE_SERIAL=y
+CONFIG_MOUSE_SYNAPTICS_USB=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_DRM=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_ARCPGU=m
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_DW=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
new file mode 100644
index 0000000000..73ec01ed04
--- /dev/null
+++ b/arch/arc/configs/axs103_defconfig
@@ -0,0 +1,102 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARC_PLAT_AXS10X=y
+CONFIG_AXS103=y
+CONFIG_ISA_ARCV2=y
+CONFIG_ARC_BUILTIN_DTB_NAME="axs103"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NATIONAL_PHY=y
+# CONFIG_USB_NET_DRIVERS is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_MOUSE_SERIAL=y
+CONFIG_MOUSE_SYNAPTICS_USB=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_DW=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
new file mode 100644
index 0000000000..4da0f626fa
--- /dev/null
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -0,0 +1,104 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARC_PLAT_AXS10X=y
+CONFIG_AXS103=y
+CONFIG_ISA_ARCV2=y
+CONFIG_SMP=y
+CONFIG_ARC_BUILTIN_DTB_NAME="axs103_idu"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NATIONAL_PHY=y
+# CONFIG_USB_NET_DRIVERS is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_MOUSE_SERIAL=y
+CONFIG_MOUSE_SYNAPTICS_USB=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_DRM=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_ARCPGU=m
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_DW=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
new file mode 100644
index 0000000000..8c3ed5d6e6
--- /dev/null
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -0,0 +1,62 @@
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs"
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_VIRTIO_BLK=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_VIRTIO_MMIO=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
new file mode 100644
index 0000000000..6fc98c1b9b
--- /dev/null
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -0,0 +1,62 @@
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SMP=y
+CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs_idu"
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
new file mode 100644
index 0000000000..9e79154b55
--- /dev/null
+++ b/arch/arc/configs/hsdk_defconfig
@@ -0,0 +1,93 @@
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARC_SOC_HSDK=y
+CONFIG_ISA_ARCV2=y
+CONFIG_SMP=y
+CONFIG_LINUX_LINK_BASE=0x90000000
+CONFIG_LINUX_RAM_BASE=0x80000000
+CONFIG_ARC_BUILTIN_DTB_NAME="hsdk"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+CONFIG_STMMAC_ETH=y
+CONFIG_MICREL_PHY=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_MMIO=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_SNPS_CREG=y
+# CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_DRM=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+CONFIG_DRM_UDL=y
+CONFIG_DRM_ETNAVIV=y
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_DW=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_AXI_DMAC=y
+CONFIG_IIO=y
+CONFIG_TI_ADC108S102=y
+CONFIG_EXT3_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_ECHAINIV=y
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
new file mode 100644
index 0000000000..51092c39e3
--- /dev/null
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -0,0 +1,59 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
+CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
new file mode 100644
index 0000000000..70c17bca49
--- /dev/null
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -0,0 +1,67 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
+# CONFIG_MOUSE_PS2_CYPRESS is not set
+# CONFIG_MOUSE_PS2_TRACKPOINT is not set
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
new file mode 100644
index 0000000000..59a3b6642f
--- /dev/null
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -0,0 +1,65 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_ISA_ARCV2=y
+CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
+# CONFIG_MOUSE_PS2_TRACKPOINT is not set
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
new file mode 100644
index 0000000000..1419fc946a
--- /dev/null
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -0,0 +1,74 @@
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_ISA_ARCV2=y
+CONFIG_SMP=y
+# CONFIG_ARC_TIMERS_64BIT is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=y
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
+CONFIG_LOGO=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_FTRACE=y
+# CONFIG_NET_VENDOR_CADENCE is not set
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
new file mode 100644
index 0000000000..1a68e4beeb
--- /dev/null
+++ b/arch/arc/configs/tb10x_defconfig
@@ -0,0 +1,99 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="tb10x"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../tb10x-rootfs.cpio"
+CONFIG_INITRAMFS_ROOT_UID=2100
+CONFIG_INITRAMFS_ROOT_GID=501
+# CONFIG_RD_GZIP is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_ARC_PLAT_TB10X=y
+CONFIG_ARC_CACHE_LINE_SHIFT=5
+CONFIG_HZ=250
+CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=y
+CONFIG_ASYNC_TX_DMA=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_INSTALL=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
new file mode 100644
index 0000000000..50c3439138
--- /dev/null
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -0,0 +1,94 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARC_PLAT_AXS10X=y
+CONFIG_AXS103=y
+CONFIG_ISA_ARCV2=y
+CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
+CONFIG_PREEMPT=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_SLRAM=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NATIONAL_PHY=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_SERIAL=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
new file mode 100644
index 0000000000..6d9e1d9f71
--- /dev/null
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -0,0 +1,100 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARC_PLAT_AXS10X=y
+CONFIG_AXS103=y
+CONFIG_ISA_ARCV2=y
+CONFIG_SMP=y
+# CONFIG_ARC_TIMERS_64BIT is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
+CONFIG_PREEMPT=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_SLRAM=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NATIONAL_PHY=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_SERIAL=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_DW=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
new file mode 100644
index 0000000000..3c1afa524b
--- /dev/null
+++ b/arch/arc/include/asm/Kbuild
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += extable.h
+generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
+generic-y += parport.h
+generic-y += user.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
new file mode 100644
index 0000000000..4b13f60fe7
--- /dev/null
+++ b/arch/arc/include/asm/arcregs.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_ARCREGS_H
+#define _ASM_ARC_ARCREGS_H
+
+/* Build Configuration Registers */
+#define ARC_REG_AUX_DCCM 0x18 /* DCCM Base Addr ARCv2 */
+#define ARC_REG_ERP_CTRL 0x3F /* ARCv2 Error protection control */
+#define ARC_REG_DCCM_BASE_BUILD 0x61 /* DCCM Base Addr ARCompact */
+#define ARC_REG_CRC_BCR 0x62
+#define ARC_REG_VECBASE_BCR 0x68
+#define ARC_REG_PERIBASE_BCR 0x69
+#define ARC_REG_FP_BCR 0x6B /* ARCompact: Single-Precision FPU */
+#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
+#define ARC_REG_ERP_BUILD 0xc7 /* ARCv2 Error protection Build: ECC/Parity */
+#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */
+#define ARC_REG_SLC_BCR 0xce
+#define ARC_REG_DCCM_BUILD 0x74 /* DCCM size (common) */
+#define ARC_REG_AP_BCR 0x76
+#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */
+#define ARC_REG_XY_MEM_BCR 0x79
+#define ARC_REG_MAC_BCR 0x7a
+#define ARC_REG_MPY_BCR 0x7b
+#define ARC_REG_SWAP_BCR 0x7c
+#define ARC_REG_NORM_BCR 0x7d
+#define ARC_REG_MIXMAX_BCR 0x7e
+#define ARC_REG_BARREL_BCR 0x7f
+#define ARC_REG_D_UNCACH_BCR 0x6A
+#define ARC_REG_BPU_BCR 0xc0
+#define ARC_REG_ISA_CFG_BCR 0xc1
+#define ARC_REG_LPB_BUILD 0xE9 /* ARCv2 Loop Buffer Build */
+#define ARC_REG_RTT_BCR 0xF2
+#define ARC_REG_IRQ_BCR 0xF3
+#define ARC_REG_MICRO_ARCH_BCR 0xF9 /* ARCv2 Product revision */
+#define ARC_REG_SMART_BCR 0xFF
+#define ARC_REG_CLUSTER_BCR 0xcf
+#define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */
+#define ARC_REG_LPB_CTRL 0x488 /* ARCv2 Loop Buffer control */
+#define ARC_REG_FPU_CTRL 0x300
+#define ARC_REG_FPU_STATUS 0x301
+
+/* Common for ARCompact and ARCv2 status register */
+#define ARC_REG_STATUS32 0x0A
+
+/* status32 Bits Positions */
+#define STATUS_AE_BIT 5 /* Exception active */
+#define STATUS_DE_BIT 6 /* PC is in delay slot */
+#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_Z_BIT 11
+#define STATUS_L_BIT 12 /* Loop inhibit */
+
+/* These masks correspond to the status word(STATUS_32) bits */
+#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
+#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
+#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_Z_MASK (1<<STATUS_Z_BIT)
+#define STATUS_L_MASK (1<<STATUS_L_BIT)
+
+/*
+ * ECR: Exception Cause Reg bits-n-pieces
+ * [23:16] = Exception Vector
+ * [15: 8] = Exception Cause Code
+ * [ 7: 0] = Exception Parameters (for certain types only)
+ */
+#ifdef CONFIG_ISA_ARCOMPACT
+#define ECR_V_MEM_ERR 0x01
+#define ECR_V_INSN_ERR 0x02
+#define ECR_V_MACH_CHK 0x20
+#define ECR_V_ITLB_MISS 0x21
+#define ECR_V_DTLB_MISS 0x22
+#define ECR_V_PROTV 0x23
+#define ECR_V_TRAP 0x25
+#else
+#define ECR_V_MEM_ERR 0x01
+#define ECR_V_INSN_ERR 0x02
+#define ECR_V_MACH_CHK 0x03
+#define ECR_V_ITLB_MISS 0x04
+#define ECR_V_DTLB_MISS 0x05
+#define ECR_V_PROTV 0x06
+#define ECR_V_TRAP 0x09
+#define ECR_V_MISALIGN 0x0d
+#endif
+
+/* DTLB Miss and Protection Violation Cause Codes */
+
+#define ECR_C_PROTV_INST_FETCH 0x00
+#define ECR_C_PROTV_LOAD 0x01
+#define ECR_C_PROTV_STORE 0x02
+#define ECR_C_PROTV_XCHG 0x03
+#define ECR_C_PROTV_MISALIG_DATA 0x04
+
+#define ECR_C_BIT_PROTV_MISALIG_DATA 10
+
+/* Machine Check Cause Code Values */
+#define ECR_C_MCHK_DUP_TLB 0x01
+
+/* DTLB Miss Exception Cause Code Values */
+#define ECR_C_BIT_DTLB_LD_MISS 8
+#define ECR_C_BIT_DTLB_ST_MISS 9
+
+/* Auxiliary registers */
+#define AUX_IDENTITY 4
+#define AUX_EXEC_CTRL 8
+#define AUX_INTR_VEC_BASE 0x25
+#define AUX_VOL 0x5e
+
+/*
+ * Floating Pt Registers
+ * Status regs are read-only (build-time) so need not be saved/restored
+ */
+#define ARC_AUX_FP_STAT 0x300
+#define ARC_AUX_DPFP_1L 0x301
+#define ARC_AUX_DPFP_1H 0x302
+#define ARC_AUX_DPFP_2L 0x303
+#define ARC_AUX_DPFP_2H 0x304
+#define ARC_AUX_DPFP_STAT 0x305
+
+/*
+ * DSP-related registers
+ * Registers names must correspond to dsp_callee_regs structure fields names
+ * for automatic offset calculation in DSP_AUX_SAVE_RESTORE macros.
+ */
+#define ARC_AUX_DSP_BUILD 0x7A
+#define ARC_AUX_ACC0_LO 0x580
+#define ARC_AUX_ACC0_GLO 0x581
+#define ARC_AUX_ACC0_HI 0x582
+#define ARC_AUX_ACC0_GHI 0x583
+#define ARC_AUX_DSP_BFLY0 0x598
+#define ARC_AUX_DSP_CTRL 0x59F
+#define ARC_AUX_DSP_FFT_CTRL 0x59E
+
+#define ARC_AUX_AGU_BUILD 0xCC
+#define ARC_AUX_AGU_AP0 0x5C0
+#define ARC_AUX_AGU_AP1 0x5C1
+#define ARC_AUX_AGU_AP2 0x5C2
+#define ARC_AUX_AGU_AP3 0x5C3
+#define ARC_AUX_AGU_OS0 0x5D0
+#define ARC_AUX_AGU_OS1 0x5D1
+#define ARC_AUX_AGU_MOD0 0x5E0
+#define ARC_AUX_AGU_MOD1 0x5E1
+#define ARC_AUX_AGU_MOD2 0x5E2
+#define ARC_AUX_AGU_MOD3 0x5E3
+
+#ifndef __ASSEMBLY__
+
+#include <soc/arc/aux.h>
+
+/* Helpers */
+#define TO_KB(bytes) ((bytes) >> 10)
+#define TO_MB(bytes) (TO_KB(bytes) >> 10)
+#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
+#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
+
+
+/*
+ ***************************************************************
+ * Build Configuration Registers, with encoded hardware config
+ */
+struct bcr_identity {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int chip_id:16, cpu_id:8, family:8;
+#else
+ unsigned int family:8, cpu_id:8, chip_id:16;
+#endif
+};
+
+struct bcr_isa_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
+ pad1:12, ver:8;
+#else
+ unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1,
+ ldd:1, pad2:4, div_rem:4;
+#endif
+};
+
+struct bcr_uarch_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:8, prod:8, maj:8, min:8;
+#else
+ unsigned int min:8, maj:8, prod:8, pad:8;
+#endif
+};
+
+struct bcr_mmu_3 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
+ u_itlb:4, u_dtlb:4;
+#else
+ unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
+ ways:4, ver:8;
+#endif
+};
+
+struct bcr_mmu_4 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
+ n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
+#else
+ /* DTLB ITLB JES JE JA */
+ unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
+ pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
+#endif
+};
+
+struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+ unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+};
+
+struct bcr_slc_cfg {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:24, way:2, lsz:2, sz:4;
+#else
+ unsigned int sz:4, lsz:2, way:2, pad:24;
+#endif
+};
+
+struct bcr_clust_cfg {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
+#else
+ unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
+#endif
+};
+
+struct bcr_volatile {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int start:4, limit:4, pad:22, order:1, disable:1;
+#else
+ unsigned int disable:1, order:1, pad:22, limit:4, start:4;
+#endif
+};
+
+struct bcr_mpy {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
+#else
+ unsigned int ver:8, type:2, cycles:2, dsp:4, x1616:8, pad:8;
+#endif
+};
+
+struct bcr_iccm_arcompact {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int base:16, pad:5, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, pad:5, base:16;
+#endif
+};
+
+struct bcr_iccm_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:8, sz11:4, sz01:4, sz10:4, sz00:4, ver:8;
+#else
+ unsigned int ver:8, sz00:4, sz10:4, sz01:4, sz11:4, pad:8;
+#endif
+};
+
+struct bcr_dccm_arcompact {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int res:21, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, res:21;
+#endif
+};
+
+struct bcr_dccm_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:12, cyc:3, pad1:1, sz1:4, sz0:4, ver:8;
+#else
+ unsigned int ver:8, sz0:4, sz1:4, pad1:1, cyc:3, pad2:12;
+#endif
+};
+
+/* ARCompact: Both SP and DP FPU BCRs have same format */
+struct bcr_fp_arcompact {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int fast:1, ver:8;
+#else
+ unsigned int ver:8, fast:1;
+#endif
+};
+
+struct bcr_fp_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:15, dp:1, pad1:7, sp:1, ver:8;
+#else
+ unsigned int ver:8, sp:1, pad1:7, dp:1, pad2:15;
+#endif
+};
+
+struct bcr_actionpoint {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:21, min:1, num:2, ver:8;
+#else
+ unsigned int ver:8, num:2, min:1, pad:21;
+#endif
+};
+
+#include <soc/arc/timers.h>
+
+struct bcr_bpu_arcompact {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:19, fam:1, pad:2, ent:2, ver:8;
+#else
+ unsigned int ver:8, ent:2, pad:2, fam:1, pad2:19;
+#endif
+};
+
+struct bcr_bpu_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:6, fbe:2, tqe:2, ts:4, ft:1, rse:2, pte:3, bce:3, ver:8;
+#else
+ unsigned int ver:8, bce:3, pte:3, rse:2, ft:1, ts:4, tqe:2, fbe:2, pad:6;
+#endif
+};
+
+/* Error Protection Build: ECC/Parity */
+struct bcr_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad3:5, mmu:3, pad2:4, ic:3, dc:3, pad1:6, ver:8;
+#else
+ unsigned int ver:8, pad1:6, dc:3, ic:3, pad2:4, mmu:3, pad3:5;
+#endif
+};
+
+/* Error Protection Control */
+struct ctl_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:27, mpd:1, pad1:2, dpd:1, dpi:1;
+#else
+ unsigned int dpi:1, dpd:1, pad1:2, mpd:1, pad2:27;
+#endif
+};
+
+struct bcr_lpb {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:16, entries:8, ver:8;
+#else
+ unsigned int ver:8, entries:8, pad:16;
+#endif
+};
+
+struct bcr_generic {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int info:24, ver:8;
+#else
+ unsigned int ver:8, info:24;
+#endif
+};
+
+static inline int is_isa_arcv2(void)
+{
+ return IS_ENABLED(CONFIG_ISA_ARCV2);
+}
+
+static inline int is_isa_arcompact(void)
+{
+ return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
+}
+
+#endif /* __ASEMBLY__ */
+
+#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/asm-offsets.h b/arch/arc/include/asm/asm-offsets.h
new file mode 100644
index 0000000000..32a1d3d518
--- /dev/null
+++ b/arch/arc/include/asm/asm-offsets.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <generated/asm-offsets.h>
diff --git a/arch/arc/include/asm/asserts.h b/arch/arc/include/asm/asserts.h
new file mode 100644
index 0000000000..108f33be6a
--- /dev/null
+++ b/arch/arc/include/asm/asserts.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+#ifndef __ASM_ARC_ASSERTS_H
+#define __ASM_ARC_ASSERTS_H
+
+/* Helpers to sanitize config options. */
+
+void chk_opt_strict(char *opt_name, bool hw_exists, bool opt_ena);
+void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena);
+
+/*
+ * Check required config option:
+ * - panic in case of OPT enabled but corresponding HW absent.
+ * - warn in case of OPT disabled but corresponding HW exists.
+*/
+#define CHK_OPT_STRICT(opt_name, hw_exists) \
+({ \
+ chk_opt_strict(#opt_name, hw_exists, IS_ENABLED(opt_name)); \
+})
+
+/*
+ * Check optional config option:
+ * - panic in case of OPT enabled but corresponding HW absent.
+*/
+#define CHK_OPT_WEAK(opt_name, hw_exists) \
+({ \
+ chk_opt_weak(#opt_name, hw_exists, IS_ENABLED(opt_name)); \
+})
+
+#endif /* __ASM_ARC_ASSERTS_H */
diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h
new file mode 100644
index 0000000000..5258cb81a1
--- /dev/null
+++ b/arch/arc/include/asm/atomic-llsc.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_ATOMIC_LLSC_H
+#define _ASM_ARC_ATOMIC_LLSC_H
+
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+#define ATOMIC_OP(op, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned int val; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %[val], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[val], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " bnz 1b \n" \
+ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
+ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
+ [i] "ir" (i) \
+ : "cc", "memory"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, asm_op) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
+{ \
+ unsigned int val; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %[val], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[val], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " bnz 1b \n" \
+ : [val] "=&r" (val) \
+ : [ctr] "r" (&v->counter), \
+ [i] "ir" (i) \
+ : "cc", "memory"); \
+ \
+ return val; \
+}
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+
+#define ATOMIC_FETCH_OP(op, asm_op) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ unsigned int val, orig; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %[orig], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[orig], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " bnz 1b \n" \
+ : [val] "=&r" (val), \
+ [orig] "=&r" (orig) \
+ : [ctr] "r" (&v->counter), \
+ [i] "ir" (i) \
+ : "cc", "memory"); \
+ \
+ return orig; \
+}
+
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_OP_RETURN(op, asm_op) \
+ ATOMIC_FETCH_OP(op, asm_op)
+
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_FETCH_OP(op, asm_op)
+
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(andnot, bic)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, xor)
+
+#define arch_atomic_andnot arch_atomic_andnot
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#endif
diff --git a/arch/arc/include/asm/atomic-spinlock.h b/arch/arc/include/asm/atomic-spinlock.h
new file mode 100644
index 0000000000..89d12a60f8
--- /dev/null
+++ b/arch/arc/include/asm/atomic-spinlock.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_ATOMIC_SPLOCK_H
+#define _ASM_ARC_ATOMIC_SPLOCK_H
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+static inline void arch_atomic_set(atomic_t *v, int i)
+{
+ /*
+ * Independent of hardware support, all of the atomic_xxx() APIs need
+ * to follow the same locking rules to make sure that a "hardware"
+ * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+ * sequence
+ *
+ * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+ * requires the locking.
+ */
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ WRITE_ONCE(v->counter, i);
+ atomic_ops_unlock(flags);
+}
+
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ atomic_ops_lock(flags); \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned int temp; \
+ \
+ /* \
+ * spin lock/unlock provides the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(flags); \
+ temp = v->counter; \
+ temp c_op i; \
+ v->counter = temp; \
+ atomic_ops_unlock(flags); \
+ \
+ return temp; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned int orig; \
+ \
+ /* \
+ * spin lock/unlock provides the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(flags); \
+ orig = v->counter; \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
+ \
+ return orig; \
+}
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
+
+#define arch_atomic_andnot arch_atomic_andnot
+
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#endif
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
new file mode 100644
index 0000000000..592d7fffc2
--- /dev/null
+++ b/arch/arc/include/asm/atomic.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_ATOMIC_H
+#define _ASM_ARC_ATOMIC_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+#include <asm/atomic-llsc.h>
+#else
+#include <asm/atomic-spinlock.h>
+#endif
+
+/*
+ * 64-bit atomics
+ */
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#else
+#include <asm/atomic64-arcv2.h>
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h
new file mode 100644
index 0000000000..9b5791b854
--- /dev/null
+++ b/arch/arc/include/asm/atomic64-arcv2.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
+ * - The address HAS to be 64-bit aligned
+ */
+
+#ifndef _ASM_ARC_ATOMIC64_ARCV2_H
+#define _ASM_ARC_ATOMIC64_ARCV2_H
+
+typedef struct {
+ s64 __aligned(8) counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(a) { (a) }
+
+static inline s64 arch_atomic64_read(const atomic64_t *v)
+{
+ s64 val;
+
+ __asm__ __volatile__(
+ " ldd %0, [%1] \n"
+ : "=r"(val)
+ : "r"(&v->counter));
+
+ return val;
+}
+
+static inline void arch_atomic64_set(atomic64_t *v, s64 a)
+{
+ /*
+ * This could have been a simple assignment in "C" but would need
+ * explicit volatile. Otherwise gcc optimizers could elide the store
+ * which borked atomic64 self-test
+ * In the inline asm version, memory clobber needed for exact same
+ * reason, to tell gcc about the store.
+ *
+ * This however is not needed for sibling atomic64_add() etc since both
+ * load/store are explicitly done in inline asm. As long as API is used
+ * for each access, gcc has no way to optimize away any load/store
+ */
+ __asm__ __volatile__(
+ " std %0, [%1] \n"
+ :
+ : "r"(a), "r"(&v->counter)
+ : "memory");
+}
+
+#define ATOMIC64_OP(op, op1, op2) \
+static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
+{ \
+ s64 val; \
+ \
+ __asm__ __volatile__( \
+ "1: \n" \
+ " llockd %0, [%1] \n" \
+ " " #op1 " %L0, %L0, %L2 \n" \
+ " " #op2 " %H0, %H0, %H2 \n" \
+ " scondd %0, [%1] \n" \
+ " bnz 1b \n" \
+ : "=&r"(val) \
+ : "r"(&v->counter), "ir"(a) \
+ : "cc", "memory"); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, op1, op2) \
+static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
+{ \
+ s64 val; \
+ \
+ __asm__ __volatile__( \
+ "1: \n" \
+ " llockd %0, [%1] \n" \
+ " " #op1 " %L0, %L0, %L2 \n" \
+ " " #op2 " %H0, %H0, %H2 \n" \
+ " scondd %0, [%1] \n" \
+ " bnz 1b \n" \
+ : [val] "=&r"(val) \
+ : "r"(&v->counter), "ir"(a) \
+ : "cc", "memory"); \
+ \
+ return val; \
+}
+
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+
+#define ATOMIC64_FETCH_OP(op, op1, op2) \
+static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
+{ \
+ s64 val, orig; \
+ \
+ __asm__ __volatile__( \
+ "1: \n" \
+ " llockd %0, [%2] \n" \
+ " " #op1 " %L1, %L0, %L3 \n" \
+ " " #op2 " %H1, %H0, %H3 \n" \
+ " scondd %1, [%2] \n" \
+ " bnz 1b \n" \
+ : "=&r"(orig), "=&r"(val) \
+ : "r"(&v->counter), "ir"(a) \
+ : "cc", "memory"); \
+ \
+ return orig; \
+}
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_OP_RETURN(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
+
+ATOMIC64_OPS(add, add.f, adc)
+ATOMIC64_OPS(sub, sub.f, sbc)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
+
+ATOMIC64_OPS(and, and, and)
+ATOMIC64_OPS(andnot, bic, bic)
+ATOMIC64_OPS(or, or, or)
+ATOMIC64_OPS(xor, xor, xor)
+
+#define arch_atomic64_andnot arch_atomic64_andnot
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+static inline s64
+arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
+{
+ s64 prev;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%1] \n"
+ " brne %L0, %L2, 2f \n"
+ " brne %H0, %H2, 2f \n"
+ " scondd %3, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev)
+ : "r"(ptr), "ir"(expected), "r"(new)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return prev;
+}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+
+static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
+{
+ s64 prev;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%1] \n"
+ " scondd %2, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev)
+ : "r"(ptr), "r"(new)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return prev;
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 val;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%1] \n"
+ " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
+ " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
+ " brlt %H0, 0, 2f \n"
+ " scondd %0, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(val)
+ : "r"(&v->counter)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return val;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 old, temp;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%2] \n"
+ " brne %L0, %L4, 2f # continue to add since v != u \n"
+ " breq.d %H0, %H4, 3f # return since v == u \n"
+ "2: \n"
+ " add.f %L1, %L0, %L3 \n"
+ " adc %H1, %H0, %H3 \n"
+ " scondd %1, [%2] \n"
+ " bnz 1b \n"
+ "3: \n"
+ : "=&r"(old), "=&r" (temp)
+ : "r"(&v->counter), "r"(a), "r"(u)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return old;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+
+#endif
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
new file mode 100644
index 0000000000..4637de9e02
--- /dev/null
+++ b/arch/arc/include/asm/barrier.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifdef CONFIG_ISA_ARCV2
+
+/*
+ * ARCv2 based HS38 cores are in-order issue, but still weakly ordered
+ * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ...
+ *
+ * Explicit barrier provided by DMB instruction
+ * - Operand supports fine grained load/store/load+store semantics
+ * - Ensures that selected memory operation issued before it will complete
+ * before any subsequent memory operation of same type
+ * - DMB guarantees SMP as well as local barrier semantics
+ * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
+ * UP: barrier(), SMP: smp_*mb == *mb)
+ * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed
+ * in the general case. Plus it only provides full barrier.
+ */
+
+#define mb() asm volatile("dmb 3\n" : : : "memory")
+#define rmb() asm volatile("dmb 1\n" : : : "memory")
+#define wmb() asm volatile("dmb 2\n" : : : "memory")
+
+#else
+
+/*
+ * ARCompact based cores (ARC700) only have SYNC instruction which is super
+ * heavy weight as it flushes the pipeline as well.
+ * There are no real SMP implementations of such cores.
+ */
+
+#define mb() asm volatile("sync\n" : : : "memory")
+
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
new file mode 100644
index 0000000000..f5a936496f
--- /dev/null
+++ b/arch/arc/include/asm/bitops.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#ifdef CONFIG_ISA_ARCOMPACT
+
+/*
+ * Count the number of zeros, starting from MSB
+ * Helper for fls( ) friends
+ * This is a pure count, so (1-32) or (0-31) doesn't apply
+ * It could be 0 to 32, based on num of 0's in there
+ * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
+ */
+static inline __attribute__ ((const)) int clz(unsigned int x)
+{
+ unsigned int res;
+
+ __asm__ __volatile__(
+ " norm.f %0, %1 \n"
+ " mov.n %0, 0 \n"
+ " add.p %0, %0, 1 \n"
+ : "=r"(res)
+ : "r"(x)
+ : "cc");
+
+ return res;
+}
+
+static inline int constant_fls(unsigned int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u))
+ r -= 1;
+ return r;
+}
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned int x)
+{
+ if (__builtin_constant_p(x))
+ return constant_fls(x);
+
+ return 32 - clz(x);
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) unsigned long __fls(unsigned long x)
+{
+ if (!x)
+ return 0;
+ else
+ return fls(x) - 1;
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
+{
+ if (!word)
+ return word;
+
+ return ffs(word) - 1;
+}
+
+#else /* CONFIG_ISA_ARCV2 */
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned int x)
+{
+ int n;
+
+ asm volatile(
+ " fls.f %0, %1 \n" /* 0:31; 0(Z) if src 0 */
+ " add.nz %0, %0, 1 \n" /* 0:31 -> 1:32 */
+ : "=r"(n) /* Early clobber not needed */
+ : "r"(x)
+ : "cc");
+
+ return n;
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
+ */
+static inline __attribute__ ((const)) unsigned long __fls(unsigned long x)
+{
+ /* FLS insn has exactly same semantics as the API */
+ return __builtin_arc_fls(x);
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+static inline __attribute__ ((const)) int ffs(unsigned int x)
+{
+ int n;
+
+ asm volatile(
+ " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
+ " add.nz %0, %0, 1 \n" /* 0:31 -> 1:32 */
+ " mov.z %0, 0 \n" /* 31(Z)-> 0 */
+ : "=r"(n) /* Early clobber not needed */
+ : "r"(x)
+ : "cc");
+
+ return n;
+}
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
+{
+ unsigned long n;
+
+ asm volatile(
+ " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
+ " mov.z %0, 0 \n" /* 31(Z)-> 0 */
+ : "=r"(n)
+ : "r"(x)
+ : "cc");
+
+ return n;
+
+}
+
+#endif /* CONFIG_ISA_ARCOMPACT */
+
+/*
+ * ffz = Find First Zero in word.
+ * @return:[0-31], 32 if all 1's
+ */
+#define ffz(x) __ffs(~(x))
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
new file mode 100644
index 0000000000..4c453ba96c
--- /dev/null
+++ b/arch/arc/include/asm/bug.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_BUG_H
+#define _ASM_ARC_BUG_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+struct task_struct;
+
+void show_regs(struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
+ const char *loglvl);
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address);
+void die(const char *str, struct pt_regs *regs, unsigned long address);
+
+#define BUG() do { \
+ pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+ barrier_before_unreachable(); \
+ __builtin_trap(); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
new file mode 100644
index 0000000000..f0f1fc5d62
--- /dev/null
+++ b/arch/arc/include/asm/cache.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ARC_ASM_CACHE_H
+#define __ARC_ASM_CACHE_H
+
+/* In case $$ not config, setup a dummy number for rest of kernel */
+#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
+#define L1_CACHE_SHIFT 6
+#else
+#define L1_CACHE_SHIFT CONFIG_ARC_CACHE_LINE_SHIFT
+#endif
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1))
+
+/*
+ * ARC700 doesn't cache any access in top 1G (0xc000_0000 to 0xFFFF_FFFF)
+ * Ideal for wiring memory mapped peripherals as we don't need to do
+ * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
+ */
+#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
+
+#ifndef __ASSEMBLY__
+
+#include <linux/build_bug.h>
+
+/* Uncached access macros */
+#define arc_read_uncached_32(ptr) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " ld.di %0, [%1] \n" \
+ : "=r"(__ret) \
+ : "r"(ptr)); \
+ __ret; \
+})
+
+#define arc_write_uncached_32(ptr, data)\
+({ \
+ __asm__ __volatile__( \
+ " st.di %0, [%1] \n" \
+ : \
+ : "r"(data), "r"(ptr)); \
+})
+
+/* Largest line length for either L1 or L2 is 128 bytes */
+#define SMP_CACHE_BYTES 128
+#define cache_line_size() SMP_CACHE_BYTES
+#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
+
+/*
+ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
+ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
+ * alignment for any atomic64_t embedded in buffer.
+ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
+ * value of 4 (and not 8) in ARC ABI.
+ */
+#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
+extern int ioc_enable;
+extern unsigned long perip_base, perip_end;
+
+#endif /* !__ASSEMBLY__ */
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
+#define ARC_REG_IC_IVIC 0x10
+#define ARC_REG_IC_CTRL 0x11
+#define ARC_REG_IC_IVIR 0x16
+#define ARC_REG_IC_ENDR 0x17
+#define ARC_REG_IC_IVIL 0x19
+#define ARC_REG_IC_PTAG 0x1E
+#define ARC_REG_IC_PTAG_HI 0x1F
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_DIS 0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
+#define ARC_REG_DC_IVDC 0x47
+#define ARC_REG_DC_CTRL 0x48
+#define ARC_REG_DC_IVDL 0x4A
+#define ARC_REG_DC_FLSH 0x4B
+#define ARC_REG_DC_FLDL 0x4C
+#define ARC_REG_DC_STARTR 0x4D
+#define ARC_REG_DC_ENDR 0x4E
+#define ARC_REG_DC_PTAG 0x5C
+#define ARC_REG_DC_PTAG_HI 0x5F
+
+/* Bit val in DC_CTRL */
+#define DC_CTRL_DIS 0x001
+#define DC_CTRL_INV_MODE_FLUSH 0x040
+#define DC_CTRL_FLUSH_STATUS 0x100
+#define DC_CTRL_RGN_OP_INV 0x200
+#define DC_CTRL_RGN_OP_MSK 0x200
+
+/*System-level cache (L2 cache) related Auxiliary registers */
+#define ARC_REG_SLC_CFG 0x901
+#define ARC_REG_SLC_CTRL 0x903
+#define ARC_REG_SLC_FLUSH 0x904
+#define ARC_REG_SLC_INVALIDATE 0x905
+#define ARC_AUX_SLC_IVDL 0x910
+#define ARC_AUX_SLC_FLDL 0x912
+#define ARC_REG_SLC_RGN_START 0x914
+#define ARC_REG_SLC_RGN_START1 0x915
+#define ARC_REG_SLC_RGN_END 0x916
+#define ARC_REG_SLC_RGN_END1 0x917
+
+/* Bit val in SLC_CONTROL */
+#define SLC_CTRL_DIS 0x001
+#define SLC_CTRL_IM 0x040
+#define SLC_CTRL_BUSY 0x100
+#define SLC_CTRL_RGN_OP_INV 0x200
+
+/* IO coherency related Auxiliary registers */
+#define ARC_REG_IO_COH_ENABLE 0x500
+#define ARC_IO_COH_ENABLE_BIT BIT(0)
+#define ARC_REG_IO_COH_PARTIAL 0x501
+#define ARC_IO_COH_PARTIAL_BIT BIT(0)
+#define ARC_REG_IO_COH_AP0_BASE 0x508
+#define ARC_REG_IO_COH_AP0_SIZE 0x509
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
new file mode 100644
index 0000000000..bd5b1a9a05
--- /dev/null
+++ b/arch/arc/include/asm/cacheflush.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ * -flush_cache_dup_mm (fork)
+ * -likewise for flush_cache_mm (exit/execve)
+ * -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
+ *
+ * vineetg: April 2008
+ * -Added a critical CacheLine flush to copy_to_user_page( ) which
+ * was causing gdbserver to not setup breakpoints consistently
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/shmparam.h>
+
+void flush_cache_all(void);
+
+void flush_icache_range(unsigned long kstart, unsigned long kend);
+void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
+void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr);
+void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+void flush_dcache_page(struct page *page);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
+
+void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
+void dma_cache_inv(phys_addr_t start, unsigned long sz);
+void dma_cache_wback(phys_addr_t start, unsigned long sz);
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+/* TBD: optimize this */
+#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vunmap(start, end) flush_cache_all()
+
+#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
+
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
+#define flush_cache_mm(mm) /* called on munmap/exit */
+#define flush_cache_range(mm, u_vstart, u_vend)
+#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
+
+#else /* VIPT aliasing dcache */
+
+/* To clear out stale userspace mappings */
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start,unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long page);
+
+/*
+ * To make sure that userspace mapping is flushed to memory before
+ * get_user_pages() uses a kernel mapping to access the page
+ */
+#define ARCH_HAS_FLUSH_ANON_PAGE
+void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long u_vaddr);
+
+#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
+/*
+ * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
+ * This works around some PIO based drivers which don't call flush_dcache_page
+ * to record that they dirtied the dcache
+ */
+#define PG_dc_clean PG_arch_1
+
+#define CACHE_COLORS_NUM 4
+#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
+#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
+
+/*
+ * Simple wrapper over config option
+ * Bootup code ensures that hardware matches kernel configuration
+ */
+static inline int cache_is_vipt_aliasing(void)
+{
+ return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
+}
+
+/*
+ * checks if two addresses (after page aligning) index into same cache set
+ */
+#define addr_not_cache_congruent(addr1, addr2) \
+({ \
+ cache_is_vipt_aliasing() ? \
+ (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
+})
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+ if (vma->vm_flags & VM_EXEC) \
+ __sync_icache_dcache((unsigned long)(dst), vaddr, len); \
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len); \
+
+#endif
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
new file mode 100644
index 0000000000..0b485800a3
--- /dev/null
+++ b/arch/arc/include/asm/checksum.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Joern Rennecke <joern.rennecke@embecosm.com>: Jan 2012
+ * -Insn Scheduling improvements to csum core routines.
+ * = csum_fold( ) largely derived from ARM version.
+ * = ip_fast_cum( ) to have module scheduling
+ * -gcc 4.4.x broke networking. Alias analysis needed to be primed.
+ * worked around by adding memory clobber to ip_fast_csum( )
+ *
+ * vineetg: May 2010
+ * -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
+ */
+
+#ifndef _ASM_ARC_CHECKSUM_H
+#define _ASM_ARC_CHECKSUM_H
+
+/*
+ * Fold a partial checksum
+ *
+ * The 2 swords comprising the 32bit sum are added, any carry to 16th bit
+ * added back and final sword result inverted.
+ */
+static inline __sum16 csum_fold(__wsum s)
+{
+ unsigned int r = s << 16 | s >> 16; /* ror */
+ s = ~s;
+ s -= r;
+ return s >> 16;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ const void *ptr = iph;
+ unsigned int tmp, tmp2, sum;
+
+ __asm__(
+ " ld.ab %0, [%3, 4] \n"
+ " ld.ab %2, [%3, 4] \n"
+ " sub %1, %4, 2 \n"
+ " lsr.f lp_count, %1, 1 \n"
+ " bcc 0f \n"
+ " add.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ "0: lp 1f \n"
+ " ld.ab %1, [%3, 4] \n"
+ " adc.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ " adc.f %0, %0, %1 \n"
+ "1: adc.f %0, %0, %2 \n"
+ " add.cs %0,%0,1 \n"
+ : "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
+ : "r"(ihl)
+ : "cc", "lp_count", "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * TCP pseudo Header is 12 bytes:
+ * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+ __asm__ __volatile__(
+ " add.f %0, %0, %1 \n"
+ " adc.f %0, %0, %2 \n"
+ " adc.f %0, %0, %3 \n"
+ " adc.f %0, %0, %4 \n"
+ " adc %0, %0, 0 \n"
+ : "+&r"(sum)
+ : "r"(saddr), "r"(daddr),
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ "r"(len),
+#else
+ "r"(len << 8),
+#endif
+ "r"(htons(proto))
+ : "cc");
+
+ return sum;
+}
+
+#define csum_fold csum_fold
+#define ip_fast_csum ip_fast_csum
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_ARC_CHECKSUM_H */
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
new file mode 100644
index 0000000000..e138fde067
--- /dev/null
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_ARC_CMPXCHG_H
+#define __ASM_ARC_CMPXCHG_H
+
+#include <linux/build_bug.h>
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+/*
+ * if (*ptr == @old)
+ * *ptr = @new
+ */
+#define __cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(*(ptr)) _prev; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %0, [%1] \n" \
+ " brne %0, %2, 2f \n" \
+ " scond %3, [%1] \n" \
+ " bnz 1b \n" \
+ "2: \n" \
+ : "=&r"(_prev) /* Early clobber prevent reg reuse */ \
+ : "r"(ptr), /* Not "m": llock only supports reg */ \
+ "ir"(old), \
+ "r"(new) /* Not "ir": scond can't take LIMM */ \
+ : "cc", \
+ "memory"); /* gcc knows memory is clobbered */ \
+ \
+ _prev; \
+})
+
+#define arch_cmpxchg_relaxed(ptr, old, new) \
+({ \
+ __typeof__(ptr) _p_ = (ptr); \
+ __typeof__(*(ptr)) _o_ = (old); \
+ __typeof__(*(ptr)) _n_ = (new); \
+ __typeof__(*(ptr)) _prev_; \
+ \
+ switch(sizeof((_p_))) { \
+ case 4: \
+ _prev_ = __cmpxchg(_p_, _o_, _n_); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ _prev_; \
+})
+
+#else
+
+#define arch_cmpxchg(ptr, old, new) \
+({ \
+ volatile __typeof__(ptr) _p_ = (ptr); \
+ __typeof__(*(ptr)) _o_ = (old); \
+ __typeof__(*(ptr)) _n_ = (new); \
+ __typeof__(*(ptr)) _prev_; \
+ unsigned long __flags; \
+ \
+ BUILD_BUG_ON(sizeof(_p_) != 4); \
+ \
+ /* \
+ * spin lock/unlock provide the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(__flags); \
+ _prev_ = *_p_; \
+ if (_prev_ == _o_) \
+ *_p_ = _n_; \
+ atomic_ops_unlock(__flags); \
+ _prev_; \
+})
+
+#endif
+
+/*
+ * xchg
+ */
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define __arch_xchg(ptr, val) \
+({ \
+ __asm__ __volatile__( \
+ " ex %0, [%1] \n" /* set new value */ \
+ : "+r"(val) \
+ : "r"(ptr) \
+ : "memory"); \
+ _val_; /* get old value */ \
+})
+
+#define arch_xchg_relaxed(ptr, val) \
+({ \
+ __typeof__(ptr) _p_ = (ptr); \
+ __typeof__(*(ptr)) _val_ = (val); \
+ \
+ switch(sizeof(*(_p_))) { \
+ case 4: \
+ _val_ = __arch_xchg(_p_, _val_); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ _val_; \
+})
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+/*
+ * EX instructions is baseline and present in !LLSC too. But in this
+ * regime it still needs use @atomic_ops_lock spinlock to allow interop
+ * with cmpxchg() which uses spinlock in !LLSC
+ * (llist.h use xchg and cmpxchg on sama data)
+ */
+
+#define arch_xchg(ptr, val) \
+({ \
+ __typeof__(ptr) _p_ = (ptr); \
+ __typeof__(*(ptr)) _val_ = (val); \
+ \
+ unsigned long __flags; \
+ \
+ atomic_ops_lock(__flags); \
+ \
+ __asm__ __volatile__( \
+ " ex %0, [%1] \n" \
+ : "+r"(_val_) \
+ : "r"(_p_) \
+ : "memory"); \
+ \
+ atomic_ops_unlock(__flags); \
+ _val_; \
+})
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
new file mode 100644
index 0000000000..06be89f6f2
--- /dev/null
+++ b/arch/arc/include/asm/current.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Vineetg: May 16th, 2008
+ * - Current macro is now implemented as "global register" r25
+ */
+
+#ifndef _ASM_ARC_CURRENT_H
+#define _ASM_ARC_CURRENT_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+register struct task_struct *curr_arc asm("gp");
+#define current (curr_arc)
+
+#else
+#include <asm-generic/current.h>
+#endif /* ! CONFIG_ARC_CURR_IN_REG */
+
+#endif /* ! __ASSEMBLY__ */
+
+#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
new file mode 100644
index 0000000000..54db798f0a
--- /dev/null
+++ b/arch/arc/include/asm/delay.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Delay routines using pre computed loops_per_jiffy value.
+ *
+ * vineetg: Feb 2012
+ * -Rewrote in "C" to avoid dealing with availability of H/w MPY
+ * -Also reduced the num of MPY operations from 3 to 2
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_UDELAY_H
+#define __ASM_ARC_UDELAY_H
+
+#include <asm-generic/types.h>
+#include <asm/param.h> /* HZ */
+
+extern unsigned long loops_per_jiffy;
+
+static inline void __delay(unsigned long loops)
+{
+ __asm__ __volatile__(
+ " mov lp_count, %0 \n"
+ " lp 1f \n"
+ " nop \n"
+ "1: \n"
+ :
+ : "r"(loops)
+ : "lp_count");
+}
+
+extern void __bad_udelay(void);
+
+/*
+ * Normal Math for computing loops in "N" usecs
+ * -we have precomputed @loops_per_jiffy
+ * -1 sec has HZ jiffies
+ * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
+ *
+ * Approximate Division by multiplication:
+ * -Mathematically if we multiply and divide a number by same value the
+ * result remains unchanged: In this case, we use 2^32
+ * -> (loops_per_N_usec * 2^32 ) / 2^32
+ * -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
+ * -> (loops_per_jiffy * HZ * N * 4295) / 2^32
+ *
+ * -Divide by 2^32 is very simply right shift by 32
+ * -We simply need to ensure that the multiply per above eqn happens in
+ * 64-bit precision (if CPU doesn't support it - gcc can emaulate it)
+ */
+
+static inline void __udelay(unsigned long usecs)
+{
+ unsigned long loops;
+
+ /* (u64) cast ensures 64 bit MPY - real or emulated
+ * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
+ */
+ loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
+
+ __delay(loops);
+}
+
+#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
+ : __udelay(n)) : __udelay(n))
+
+#endif /* __ASM_ARC_UDELAY_H */
diff --git a/arch/arc/include/asm/disasm.h b/arch/arc/include/asm/disasm.h
new file mode 100644
index 0000000000..61fb4d7aff
--- /dev/null
+++ b/arch/arc/include/asm/disasm.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ARC_DISASM_H__
+#define __ARC_DISASM_H__
+
+enum {
+ op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
+ op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
+ op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
+ op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
+ op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
+ op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
+ op_B_S = 30, op_BL_S = 31
+};
+
+enum flow {
+ noflow,
+ direct_jump,
+ direct_call,
+ indirect_jump,
+ indirect_call,
+ invalid_instr
+};
+
+#define IS_BIT(word, n) ((word) & (1<<n))
+#define BITS(word, s, e) (((word) >> (s)) & (~((-2) << ((e) - (s)))))
+
+#define MAJOR_OPCODE(word) (BITS((word), 27, 31))
+#define MINOR_OPCODE(word) (BITS((word), 16, 21))
+#define FIELD_A(word) (BITS((word), 0, 5))
+#define FIELD_B(word) ((BITS((word), 12, 14)<<3) | \
+ (BITS((word), 24, 26)))
+#define FIELD_C(word) (BITS((word), 6, 11))
+#define FIELD_u6(word) FIELDC(word)
+#define FIELD_s12(word) sign_extend(((BITS((word), 0, 5) << 6) | \
+ BITS((word), 6, 11)), 12)
+
+/* note that for BL/BRcc these two macro's need another AND statement to mask
+ * out bit 1 (make the result a multiple of 4) */
+#define FIELD_s9(word) sign_extend(((BITS(word, 15, 15) << 8) | \
+ BITS(word, 16, 23)), 9)
+#define FIELD_s21(word) sign_extend(((BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+#define FIELD_s25(word) sign_extend(((BITS(word, 0, 3) << 21) | \
+ (BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+
+/* note: these operate on 16 bits! */
+#define FIELD_S_A(word) ((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
+#define FIELD_S_B(word) ((BITS((word), 10, 10)<<3) | \
+ BITS((word), 8, 10))
+#define FIELD_S_C(word) ((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
+#define FIELD_S_H(word) ((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
+#define FIELD_S_u5(word) (BITS((word), 0, 4))
+#define FIELD_S_u6(word) (BITS((word), 0, 4) << 1)
+#define FIELD_S_u7(word) (BITS((word), 0, 4) << 2)
+#define FIELD_S_u10(word) (BITS((word), 0, 7) << 2)
+#define FIELD_S_s7(word) sign_extend(BITS((word), 0, 5) << 1, 9)
+#define FIELD_S_s8(word) sign_extend(BITS((word), 0, 7) << 1, 9)
+#define FIELD_S_s9(word) sign_extend(BITS((word), 0, 8), 9)
+#define FIELD_S_s10(word) sign_extend(BITS((word), 0, 8) << 1, 10)
+#define FIELD_S_s11(word) sign_extend(BITS((word), 0, 8) << 2, 11)
+#define FIELD_S_s13(word) sign_extend(BITS((word), 0, 10) << 2, 13)
+
+#define STATUS32_L 0x00000100
+#define REG_LIMM 62
+
+struct disasm_state {
+ /* generic info */
+ unsigned long words[2];
+ int instr_len;
+ int major_opcode;
+ /* info for branch/jump */
+ int is_branch;
+ int target;
+ int delay_slot;
+ enum flow flow;
+ /* info for load/store */
+ int src1, src2, src3, dest, wb_reg;
+ int zz, aa, x, pref, di;
+ int fault, write;
+};
+
+static inline int sign_extend(int value, int bits)
+{
+ if (IS_BIT(value, (bits - 1)))
+ value |= (0xffffffff << bits);
+
+ return value;
+}
+
+static inline int is_short_instr(unsigned long addr)
+{
+ uint16_t word = *((uint16_t *)addr);
+ int opcode = (word >> 11) & 0x1F;
+ return (opcode >= 0x0B);
+}
+
+void disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs);
+int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
+ *cregs, unsigned long *fall_thru, unsigned long *target);
+long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
+void set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs);
+
+#endif /* __ARC_DISASM_H__ */
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
new file mode 100644
index 0000000000..02431027ed
--- /dev/null
+++ b/arch/arc/include/asm/dma.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef ASM_ARC_DMA_H
+#define ASM_ARC_DMA_H
+
+#define MAX_DMA_ADDRESS 0xC0000000
+
+#endif
diff --git a/arch/arc/include/asm/dsp-impl.h b/arch/arc/include/asm/dsp-impl.h
new file mode 100644
index 0000000000..cd5636dfeb
--- /dev/null
+++ b/arch/arc/include/asm/dsp-impl.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+#ifndef __ASM_ARC_DSP_IMPL_H
+#define __ASM_ARC_DSP_IMPL_H
+
+#include <asm/dsp.h>
+
+#define DSP_CTRL_DISABLED_ALL 0
+
+#ifdef __ASSEMBLY__
+
+/* clobbers r5 register */
+.macro DSP_EARLY_INIT
+#ifdef CONFIG_ISA_ARCV2
+ lr r5, [ARC_AUX_DSP_BUILD]
+ bmsk r5, r5, 7
+ breq r5, 0, 1f
+ mov r5, DSP_CTRL_DISABLED_ALL
+ sr r5, [ARC_AUX_DSP_CTRL]
+1:
+#endif
+.endm
+
+/* clobbers r10, r11 registers pair */
+.macro DSP_SAVE_REGFILE_IRQ
+#if defined(CONFIG_ARC_DSP_KERNEL)
+ /*
+ * Drop any changes to DSP_CTRL made by userspace so userspace won't be
+ * able to break kernel - reset it to DSP_CTRL_DISABLED_ALL value
+ */
+ mov r10, DSP_CTRL_DISABLED_ALL
+ sr r10, [ARC_AUX_DSP_CTRL]
+
+#elif defined(CONFIG_ARC_DSP_SAVE_RESTORE_REGS)
+ /*
+ * Save DSP_CTRL register and reset it to value suitable for kernel
+ * (DSP_CTRL_DISABLED_ALL)
+ */
+ mov r10, DSP_CTRL_DISABLED_ALL
+ aex r10, [ARC_AUX_DSP_CTRL]
+ st r10, [sp, PT_DSP_CTRL]
+
+#endif
+.endm
+
+/* clobbers r10, r11 registers pair */
+.macro DSP_RESTORE_REGFILE_IRQ
+#if defined(CONFIG_ARC_DSP_SAVE_RESTORE_REGS)
+ ld r10, [sp, PT_DSP_CTRL]
+ sr r10, [ARC_AUX_DSP_CTRL]
+
+#endif
+.endm
+
+#else /* __ASEMBLY__ */
+
+#include <linux/sched.h>
+#include <asm/asserts.h>
+#include <asm/switch_to.h>
+
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+
+/*
+ * As we save new and restore old AUX register value in the same place we
+ * can optimize a bit and use AEX instruction (swap contents of an auxiliary
+ * register with a core register) instead of LR + SR pair.
+ */
+#define AUX_SAVE_RESTORE(_saveto, _readfrom, _offt, _aux) \
+do { \
+ long unsigned int _scratch; \
+ \
+ __asm__ __volatile__( \
+ "ld %0, [%2, %4] \n" \
+ "aex %0, [%3] \n" \
+ "st %0, [%1, %4] \n" \
+ : \
+ "=&r" (_scratch) /* must be early clobber */ \
+ : \
+ "r" (_saveto), \
+ "r" (_readfrom), \
+ "Ir" (_aux), \
+ "Ir" (_offt) \
+ : \
+ "memory" \
+ ); \
+} while (0)
+
+#define DSP_AUX_SAVE_RESTORE(_saveto, _readfrom, _aux) \
+ AUX_SAVE_RESTORE(_saveto, _readfrom, \
+ offsetof(struct dsp_callee_regs, _aux), \
+ ARC_AUX_##_aux)
+
+static inline void dsp_save_restore(struct task_struct *prev,
+ struct task_struct *next)
+{
+ long unsigned int *saveto = &prev->thread.dsp.ACC0_GLO;
+ long unsigned int *readfrom = &next->thread.dsp.ACC0_GLO;
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, ACC0_GLO);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, ACC0_GHI);
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, DSP_BFLY0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, DSP_FFT_CTRL);
+
+#ifdef CONFIG_ARC_DSP_AGU_USERSPACE
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP1);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP2);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP3);
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_OS0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_OS1);
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD1);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD2);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD3);
+#endif /* CONFIG_ARC_DSP_AGU_USERSPACE */
+}
+
+#else /* !CONFIG_ARC_DSP_SAVE_RESTORE_REGS */
+#define dsp_save_restore(p, n)
+#endif /* CONFIG_ARC_DSP_SAVE_RESTORE_REGS */
+
+static inline bool dsp_exist(void)
+{
+ struct bcr_generic bcr;
+
+ READ_BCR(ARC_AUX_DSP_BUILD, bcr);
+ return !!bcr.ver;
+}
+
+static inline bool agu_exist(void)
+{
+ struct bcr_generic bcr;
+
+ READ_BCR(ARC_AUX_AGU_BUILD, bcr);
+ return !!bcr.ver;
+}
+
+static inline void dsp_config_check(void)
+{
+ CHK_OPT_STRICT(CONFIG_ARC_DSP_HANDLED, dsp_exist());
+ CHK_OPT_WEAK(CONFIG_ARC_DSP_AGU_USERSPACE, agu_exist());
+}
+
+#endif /* __ASEMBLY__ */
+#endif /* __ASM_ARC_DSP_IMPL_H */
diff --git a/arch/arc/include/asm/dsp.h b/arch/arc/include/asm/dsp.h
new file mode 100644
index 0000000000..202c78e567
--- /dev/null
+++ b/arch/arc/include/asm/dsp.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+#ifndef __ASM_ARC_DSP_H
+#define __ASM_ARC_DSP_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * DSP-related saved registers - need to be saved only when you are
+ * scheduled out.
+ * structure fields name must correspond to aux register defenitions for
+ * automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
+ */
+struct dsp_callee_regs {
+ unsigned long ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_FFT_CTRL;
+#ifdef CONFIG_ARC_DSP_AGU_USERSPACE
+ unsigned long AGU_AP0, AGU_AP1, AGU_AP2, AGU_AP3;
+ unsigned long AGU_OS0, AGU_OS1;
+ unsigned long AGU_MOD0, AGU_MOD1, AGU_MOD2, AGU_MOD3;
+#endif
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_ARC_DSP_H */
diff --git a/arch/arc/include/asm/dwarf.h b/arch/arc/include/asm/dwarf.h
new file mode 100644
index 0000000000..a0d5ebe1bc
--- /dev/null
+++ b/arch/arc/include/asm/dwarf.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_DWARF_H
+#define _ASM_ARC_DWARF_H
+
+#ifdef __ASSEMBLY__
+
+#ifdef ARC_DW2_UNWIND_AS_CFI
+
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
+#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
+#define CFI_OFFSET .cfi_offset
+#define CFI_REL_OFFSET .cfi_rel_offset
+#define CFI_REGISTER .cfi_register
+#define CFI_RESTORE .cfi_restore
+#define CFI_UNDEFINED .cfi_undefined
+
+#else
+
+#define CFI_IGNORE #
+
+#define CFI_STARTPROC CFI_IGNORE
+#define CFI_ENDPROC CFI_IGNORE
+#define CFI_DEF_CFA CFI_IGNORE
+#define CFI_DEF_CFA_OFFSET CFI_IGNORE
+#define CFI_DEF_CFA_REGISTER CFI_IGNORE
+#define CFI_OFFSET CFI_IGNORE
+#define CFI_REL_OFFSET CFI_IGNORE
+#define CFI_REGISTER CFI_IGNORE
+#define CFI_RESTORE CFI_IGNORE
+#define CFI_UNDEFINED CFI_IGNORE
+
+#endif /* !ARC_DW2_UNWIND_AS_CFI */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_ARC_DWARF_H */
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
new file mode 100644
index 0000000000..0284ace0e1
--- /dev/null
+++ b/arch/arc/include/asm/elf.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_ARC_ELF_H
+#define __ASM_ARC_ELF_H
+
+#include <linux/types.h>
+#include <linux/elf-em.h>
+#include <uapi/asm/elf.h>
+
+#define EM_ARC_INUSE (IS_ENABLED(CONFIG_ISA_ARCOMPACT) ? \
+ EM_ARCOMPACT : EM_ARCV2)
+
+/* ARC Relocations (kernel Modules only) */
+#define R_ARC_32 0x4
+#define R_ARC_32_ME 0x1B
+#define R_ARC_32_PCREL 0x31
+
+/*to set parameters in the core dumps */
+#define ELF_ARCH EM_ARC_INUSE
+#define ELF_CLASS ELFCLASS32
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+
+/*
+ * To ensure that
+ * -we don't load something for the wrong architecture.
+ * -The userspace is using the correct syscall ABI
+ */
+struct elf32_hdr;
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
+
+/*
+ * When the program starts, a1 contains a pointer to a function to be
+ * registered with atexit, as per the SVR4 ABI. A value of 0 means we
+ * have no such handler.
+ */
+#define ELF_PLAT_INIT(_r, load_addr) ((_r)->r0 = 0)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#endif
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
new file mode 100644
index 0000000000..4d13320e0c
--- /dev/null
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_ARC_ENTRY_ARCV2_H
+#define __ASM_ARC_ENTRY_ARCV2_H
+
+#include <asm/asm-offsets.h>
+#include <asm/dsp-impl.h>
+#include <asm/irqflags-arcv2.h>
+#include <asm/thread_info.h> /* For THREAD_SIZE */
+
+/*
+ * Interrupt/Exception stack layout (pt_regs) for ARCv2
+ * (End of struct aligned to end of page [unless nested])
+ *
+ * INTERRUPT EXCEPTION
+ *
+ * manual --------------------- manual
+ * | orig_r0 |
+ * | event/ECR |
+ * | bta |
+ * | gp |
+ * | fp |
+ * | sp |
+ * | r12 |
+ * | r30 |
+ * | r58 |
+ * | r59 |
+ * hw autosave ---------------------
+ * optional | r0 |
+ * | r1 |
+ * ~ ~
+ * | r9 |
+ * | r10 |
+ * | r11 |
+ * | blink |
+ * | lpe |
+ * | lps |
+ * | lpc |
+ * | ei base |
+ * | ldi base |
+ * | jli base |
+ * ---------------------
+ * hw autosave | pc / eret |
+ * mandatory | stat32 / erstatus |
+ * ---------------------
+ */
+
+/*------------------------------------------------------------------------*/
+.macro INTERRUPT_PROLOGUE
+
+ ; Before jumping to Interrupt Vector, hardware micro-ops did following:
+ ; 1. SP auto-switched to kernel mode stack
+ ; 2. STATUS32.Z flag set if in U mode at time of interrupt (U:1,K:0)
+ ; 3. Auto save: (mandatory) Push PC and STAT32 on stack
+ ; hardware does even if CONFIG_ARC_IRQ_NO_AUTOSAVE
+ ; 4a. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
+ ;
+ ; Now
+ ; 4b. If Auto-save (optional) not enabled in hw, manually save them
+ ; 5. Manually save: r12,r30, sp,fp,gp, ACCL pair
+ ;
+ ; At the end, SP points to pt_regs
+
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
+ ; carve pt_regs on stack (case #3), PC/STAT32 already on stack
+ sub sp, sp, SZ_PT_REGS - 8
+
+ __SAVE_REGFILE_HARD
+#else
+ ; carve pt_regs on stack (case #4), which grew partially already
+ sub sp, sp, PT_r0
+#endif
+
+ __SAVE_REGFILE_SOFT
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE_KEEP_AE
+
+ ; Before jumping to Exception Vector, hardware micro-ops did following:
+ ; 1. SP auto-switched to kernel mode stack
+ ; 2. STATUS32.Z flag set if in U mode at time of exception (U:1,K:0)
+ ;
+ ; Now manually save rest of reg file
+ ; At the end, SP points to pt_regs
+
+ sub sp, sp, SZ_PT_REGS ; carve space for pt_regs
+
+ ; _HARD saves r10 clobbered by _SOFT as scratch hence comes first
+
+ __SAVE_REGFILE_HARD
+ __SAVE_REGFILE_SOFT
+
+ st r0, [sp] ; orig_r0
+
+ lr r10, [eret]
+ lr r11, [erstatus]
+ ST2 r10, r11, PT_ret
+
+ lr r10, [ecr]
+ lr r11, [erbta]
+ ST2 r10, r11, PT_event
+
+ ; OUTPUT: r10 has ECR expected by EV_Trap
+.endm
+
+.macro EXCEPTION_PROLOGUE
+
+ EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
+
+ lr r0, [efa]
+ mov r1, sp
+
+ FAKE_RET_FROM_EXCPN ; clobbers r9
+.endm
+
+/*------------------------------------------------------------------------
+ * This macro saves the registers manually which would normally be autosaved
+ * by hardware on taken interrupts. It is used by
+ * - exception handlers (which don't have autosave)
+ * - interrupt autosave disabled due to CONFIG_ARC_IRQ_NO_AUTOSAVE
+ */
+.macro __SAVE_REGFILE_HARD
+
+ ST2 r0, r1, PT_r0
+ ST2 r2, r3, PT_r2
+ ST2 r4, r5, PT_r4
+ ST2 r6, r7, PT_r6
+ ST2 r8, r9, PT_r8
+ ST2 r10, r11, PT_r10
+
+ st blink, [sp, PT_blink]
+
+ lr r10, [lp_end]
+ lr r11, [lp_start]
+ ST2 r10, r11, PT_lpe
+
+ st lp_count, [sp, PT_lpc]
+
+ ; skip JLI, LDI, EI for now
+.endm
+
+/*------------------------------------------------------------------------
+ * This macros saves a bunch of other registers which can't be autosaved for
+ * various reasons:
+ * - r12: the last caller saved scratch reg since hardware saves in pairs so r0-r11
+ * - r30: free reg, used by gcc as scratch
+ * - ACCL/ACCH pair when they exist
+ */
+.macro __SAVE_REGFILE_SOFT
+
+ st fp, [sp, PT_fp] ; r27
+ st r30, [sp, PT_r30]
+ st r12, [sp, PT_r12]
+ st r26, [sp, PT_r26] ; gp
+
+ ; Saving pt_regs->sp correctly requires some extra work due to the way
+ ; Auto stack switch works
+ ; - U mode: retrieve it from AUX_USER_SP
+ ; - K mode: add the offset from current SP where H/w starts auto push
+ ;
+ ; 1. Utilize the fact that Z bit is set if Intr taken in U mode
+ ; 2. Upon entry SP is always saved (for any inspection, unwinding etc),
+ ; but on return, restored only if U mode
+
+ lr r10, [AUX_USER_SP] ; U mode SP
+
+ ; ISA requires ADD.nz to have same dest and src reg operands
+ mov.nz r10, sp
+ add2.nz r10, r10, SZ_PT_REGS/4 ; K mode SP
+
+ st r10, [sp, PT_sp] ; SP (pt_regs->sp)
+
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ ST2 r58, r59, PT_r58
+#endif
+
+ /* clobbers r10, r11 registers pair */
+ DSP_SAVE_REGFILE_IRQ
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ GET_CURR_TASK_ON_CPU gp
+#endif
+
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro __RESTORE_REGFILE_SOFT
+
+ ld fp, [sp, PT_fp]
+ ld r30, [sp, PT_r30]
+ ld r12, [sp, PT_r12]
+ ld r26, [sp, PT_r26]
+
+ ; Restore SP (into AUX_USER_SP) only if returning to U mode
+ ; - for K mode, it will be implicitly restored as stack is unwound
+ ; - Z flag set on K is inverse of what hardware does on interrupt entry
+ ; but that doesn't really matter
+ bz 1f
+
+ ld r10, [sp, PT_sp] ; SP (pt_regs->sp)
+ sr r10, [AUX_USER_SP]
+1:
+
+ /* clobbers r10, r11 registers pair */
+ DSP_RESTORE_REGFILE_IRQ
+
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ LD2 r58, r59, PT_r58
+#endif
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro __RESTORE_REGFILE_HARD
+
+ ld blink, [sp, PT_blink]
+
+ LD2 r10, r11, PT_lpe
+ sr r10, [lp_end]
+ sr r11, [lp_start]
+
+ ld r10, [sp, PT_lpc] ; lp_count can't be target of LD
+ mov lp_count, r10
+
+ LD2 r0, r1, PT_r0
+ LD2 r2, r3, PT_r2
+ LD2 r4, r5, PT_r4
+ LD2 r6, r7, PT_r6
+ LD2 r8, r9, PT_r8
+ LD2 r10, r11, PT_r10
+.endm
+
+
+/*------------------------------------------------------------------------*/
+.macro INTERRUPT_EPILOGUE
+
+ ; INPUT: r0 has STAT32 of calling context
+ ; INPUT: Z flag set if returning to K mode
+
+ ; _SOFT clobbers r10 restored by _HARD hence the order
+
+ __RESTORE_REGFILE_SOFT
+
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
+ __RESTORE_REGFILE_HARD
+
+ ; SP points to PC/STAT32: hw restores them despite NO_AUTOSAVE
+ add sp, sp, SZ_PT_REGS - 8
+#else
+ add sp, sp, PT_r0
+#endif
+
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro EXCEPTION_EPILOGUE
+
+ ; INPUT: r0 has STAT32 of calling context
+
+ btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP
+
+ ld r10, [sp, PT_bta]
+ sr r10, [erbta]
+
+ LD2 r10, r11, PT_ret
+ sr r10, [eret]
+ sr r11, [erstatus]
+
+ __RESTORE_REGFILE_SOFT
+ __RESTORE_REGFILE_HARD
+
+ add sp, sp, SZ_PT_REGS
+.endm
+
+.macro FAKE_RET_FROM_EXCPN
+ lr r9, [status32]
+ bclr r9, r9, STATUS_AE_BIT
+ bset r9, r9, STATUS_IE_BIT
+ kflag r9
+.endm
+
+/* Get thread_info of "current" tsk */
+.macro GET_CURR_THR_INFO_FROM_SP reg
+ bmskn \reg, sp, THREAD_SHIFT - 1
+.endm
+
+/* Get CPU-ID of this core */
+.macro GET_CPU_ID reg
+ lr \reg, [identity]
+ xbfu \reg, \reg, 0xE8 /* 00111 01000 */
+ /* M = 8-1 N = 8 */
+.endm
+
+#endif
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
new file mode 100644
index 0000000000..a0e760eb35
--- /dev/null
+++ b/arch/arc/include/asm/entry-compact.h
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ * Stack switching code can no longer reliably rely on the fact that
+ * if we are NOT in user mode, stack is switched to kernel mode.
+ * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ * it's prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we also need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ * -Modified CALLEE_REG save/restore macros to handle the fact that
+ * r25 contains the kernel current task ptr
+ * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ * address Write back load ld.ab instead of separate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_COMPACT_H
+#define __ASM_ARC_ENTRY_COMPACT_H
+
+#include <asm/asm-offsets.h>
+#include <asm/irqflags-compact.h>
+#include <asm/thread_info.h> /* For THREAD_SIZE */
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry : r9 contains pre-IRQ/exception/trap status32
+ * Exit : SP set to K mode stack
+ * SP at the time of entry (K/U) saved @ pt_regs->sp
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+ /* User Mode when this happened ? Yes: Proceed to switch stack */
+ bbit1 r9, STATUS_U_BIT, 88f
+
+ /* OK we were already in kernel mode when this event happened, thus can
+ * assume SP is kernel mode SP. _NO_ need to do any stack switching
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ /* However....
+ * If Level 2 Interrupts enabled, we may end up with a corner case:
+ * 1. User Task executing
+ * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+ * 3. But before it could switch SP from USER to KERNEL stack
+ * a L2 IRQ "Interrupts" L1
+ * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * not switched.
+ * To handle this, we may need to switch stack even if in kernel mode
+ * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+ */
+ brlo sp, VMALLOC_START, 88f
+
+ /* TODO: vineetg:
+ * We need to be a bit more cautious here. What if a kernel bug in
+ * L1 ISR, caused SP to go whaco (some small value which looks like
+ * USER stk) and then we take L2 ISR.
+ * Above brlo alone would treat it as a valid L1-L2 scenario
+ * instead of shouting around
+ * The only feasible way is to make sure this L2 happened in
+ * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+ * L1 ISR before it switches stack
+ */
+
+#endif
+
+ /*------Intr/Ecxp happened in kernel mode, SP already setup ------ */
+ /* save it nevertheless @ pt_regs->sp for uniformity */
+
+ b.d 66f
+ st sp, [sp, PT_sp - SZ_PT_REGS]
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+ GET_CURR_TASK_ON_CPU r9
+
+ /* With current tsk in r9, get it's kernel mode stack base */
+ GET_TSK_STACK_BASE r9, r9
+
+ /* save U mode SP @ pt_regs->sp */
+ st sp, [r9, PT_sp - SZ_PT_REGS]
+
+ /* final SP switch */
+ mov sp, r9
+66:
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN
+
+ lr r9, [status32]
+ bclr r9, r9, STATUS_AE_BIT
+ or r9, r9, (STATUS_E1_MASK|STATUS_E2_MASK)
+ sr r9, [erstatus]
+ mov r9, 55f
+ sr r9, [eret]
+ rtie
+55:
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception/ISR Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of pt_regs.
+ *-------------------------------------------------------------*/
+.macro PROLOG_FREEUP_REG reg, mem
+ st \reg, [\mem]
+.endm
+
+.macro PROLOG_RESTORE_REG reg, mem
+ ld \reg, [\mem]
+.endm
+
+/*--------------------------------------------------------------
+ * Exception Entry prologue
+ * -Switches stack to K mode (if not already)
+ * -Saves the register file
+ *
+ * After this it is safe to call the "C" handlers
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE_KEEP_AE
+
+ /* Need at least 1 reg to code the early exception prologue */
+ PROLOG_FREEUP_REG r9, @ex_saved_reg1
+
+ /* U/K mode at time of exception (stack not switched if already K) */
+ lr r9, [erstatus]
+
+ /* ARC700 doesn't provide auto-stack switching */
+ SWITCH_TO_KERNEL_STK
+
+ st.a r0, [sp, -8] /* orig_r0 needed for syscall (skip ECR slot) */
+ sub sp, sp, 4 /* skip pt_regs->sp, already saved above */
+
+ /* Restore r9 used to code the early prologue */
+ PROLOG_RESTORE_REG r9, @ex_saved_reg1
+
+ /* now we are ready to save the regfile */
+ SAVE_R0_TO_R12
+ PUSH gp
+ PUSH fp
+ PUSH blink
+ PUSHAX eret
+ PUSHAX erstatus
+ PUSH lp_count
+ PUSHAX lp_end
+ PUSHAX lp_start
+ PUSHAX erbta
+
+ lr r10, [ecr]
+ st r10, [sp, PT_event]
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /* gp already saved on stack: now load with "current" */
+ GET_CURR_TASK_ON_CPU gp
+#endif
+ ; OUTPUT: r10 has ECR expected by EV_Trap
+.endm
+
+.macro EXCEPTION_PROLOGUE
+
+ EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
+
+ lr r0, [efa]
+ mov r1, sp
+
+ FAKE_RET_FROM_EXCPN ; clobbers r9
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_EPILOGUE
+
+ POPAX erbta
+ POPAX lp_start
+ POPAX lp_end
+
+ POP r9
+ mov lp_count, r9 ;LD to lp_count is not allowed
+
+ POPAX erstatus
+ POPAX eret
+ POP blink
+ POP fp
+ POP gp
+ RESTORE_R12_TO_R0
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0, ECR skipped automatically */
+.endm
+
+/* Dummy ECR values for Interrupts */
+#define event_IRQ1 0x0031abcd
+#define event_IRQ2 0x0032abcd
+
+.macro INTERRUPT_PROLOGUE LVL
+
+ /* free up r9 as scratchpad */
+ PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg
+
+ /* Which mode (user/kernel) was the system in when intr occurred */
+ lr r9, [status32_l\LVL\()]
+
+ SWITCH_TO_KERNEL_STK
+
+
+ PUSH 0x003\LVL\()abcd /* Dummy ECR */
+ sub sp, sp, 8 /* skip orig_r0 (not needed)
+ skip pt_regs->sp, already saved above */
+
+ /* Restore r9 used to code the early prologue */
+ PROLOG_RESTORE_REG r9, @int\LVL\()_saved_reg
+
+ SAVE_R0_TO_R12
+ PUSH gp
+ PUSH fp
+ PUSH blink
+ PUSH ilink\LVL\()
+ PUSHAX status32_l\LVL\()
+ PUSH lp_count
+ PUSHAX lp_end
+ PUSHAX lp_start
+ PUSHAX bta_l\LVL\()
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /* gp already saved on stack: now load with "current" */
+ GET_CURR_TASK_ON_CPU gp
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro INTERRUPT_EPILOGUE LVL
+
+ POPAX bta_l\LVL\()
+ POPAX lp_start
+ POPAX lp_end
+
+ POP r9
+ mov lp_count, r9 ;LD to lp_count is not allowed
+
+ POPAX status32_l\LVL\()
+ POP ilink\LVL\()
+ POP blink
+ POP fp
+ POP gp
+ RESTORE_R12_TO_R0
+
+ ld sp, [sp] /* restore original sp; orig_r0, ECR skipped implicitly */
+.endm
+
+/* Get thread_info of "current" tsk */
+.macro GET_CURR_THR_INFO_FROM_SP reg
+ bic \reg, sp, (THREAD_SIZE - 1)
+.endm
+
+/* Get CPU-ID of this core */
+.macro GET_CPU_ID reg
+ lr \reg, [identity]
+ lsr \reg, \reg, 8
+ bmsk \reg, \reg, 7
+.endm
+
+#endif /* __ASM_ARC_ENTRY_COMPACT_H */
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
new file mode 100644
index 0000000000..49c2e090cb
--- /dev/null
+++ b/arch/arc/include/asm/entry.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_ARC_ENTRY_H
+#define __ASM_ARC_ENTRY_H
+
+#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/arcregs.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h> /* For VMALLOC_START */
+#include <asm/mmu.h>
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_ISA_ARCOMPACT
+#include <asm/entry-compact.h> /* ISA specific bits */
+#else
+#include <asm/entry-arcv2.h>
+#endif
+
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a reg1, [reg2, x] => Pre Incr
+ * Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab reg1, [reg2, x] => Post Incr
+ * Eff Addr for load = [reg2]
+ */
+
+.macro PUSH reg
+ st.a \reg, [sp, -4]
+.endm
+
+.macro PUSHAX aux
+ lr r9, [\aux]
+ PUSH r9
+.endm
+
+.macro POP reg
+ ld.ab \reg, [sp, 4]
+.endm
+
+.macro POPAX aux
+ POP r9
+ sr r9, [\aux]
+.endm
+
+/*--------------------------------------------------------------
+ * Helpers to save/restore Scratch Regs:
+ * used by Interrupt/Exception Prologue/Epilogue
+ *-------------------------------------------------------------*/
+.macro SAVE_R0_TO_R12
+ PUSH r0
+ PUSH r1
+ PUSH r2
+ PUSH r3
+ PUSH r4
+ PUSH r5
+ PUSH r6
+ PUSH r7
+ PUSH r8
+ PUSH r9
+ PUSH r10
+ PUSH r11
+ PUSH r12
+.endm
+
+.macro RESTORE_R12_TO_R0
+ POP r12
+ POP r11
+ POP r10
+ POP r9
+ POP r8
+ POP r7
+ POP r6
+ POP r5
+ POP r4
+ POP r3
+ POP r2
+ POP r1
+ POP r0
+
+.endm
+
+/*--------------------------------------------------------------
+ * Helpers to save/restore callee-saved regs:
+ * used by several macros below
+ *-------------------------------------------------------------*/
+.macro SAVE_R13_TO_R25
+ PUSH r13
+ PUSH r14
+ PUSH r15
+ PUSH r16
+ PUSH r17
+ PUSH r18
+ PUSH r19
+ PUSH r20
+ PUSH r21
+ PUSH r22
+ PUSH r23
+ PUSH r24
+ PUSH r25
+.endm
+
+.macro RESTORE_R25_TO_R13
+ POP r25
+ POP r24
+ POP r23
+ POP r22
+ POP r21
+ POP r20
+ POP r19
+ POP r18
+ POP r17
+ POP r16
+ POP r15
+ POP r14
+ POP r13
+.endm
+
+/*
+ * save user mode callee regs as struct callee_regs
+ * - needed by fork/do_signal/unaligned-access-emulation.
+ */
+.macro SAVE_CALLEE_SAVED_USER
+ SAVE_R13_TO_R25
+.endm
+
+/*
+ * restore user mode callee regs as struct callee_regs
+ * - could have been changed by ptrace tracer or unaligned-access fixup
+ */
+.macro RESTORE_CALLEE_SAVED_USER
+ RESTORE_R25_TO_R13
+.endm
+
+/*
+ * save/restore kernel mode callee regs at the time of context switch
+ */
+.macro SAVE_CALLEE_SAVED_KERNEL
+ SAVE_R13_TO_R25
+.endm
+
+.macro RESTORE_CALLEE_SAVED_KERNEL
+ RESTORE_R25_TO_R13
+.endm
+
+/*--------------------------------------------------------------
+ * Super FAST Restore callee saved regs by simply re-adjusting SP
+ *-------------------------------------------------------------*/
+.macro DISCARD_CALLEE_SAVED_USER
+ add sp, sp, SZ_CALLEE_REGS
+.endm
+
+/*-------------------------------------------------------------
+ * given a tsk struct, get to the base of it's kernel mode stack
+ * tsk->thread_info is really a PAGE, whose bottom hoists stack
+ * which grows upwards towards thread_info
+ *------------------------------------------------------------*/
+
+.macro GET_TSK_STACK_BASE tsk, out
+
+ /* Get task->thread_info (this is essentially start of a PAGE) */
+ ld \out, [\tsk, TASK_THREAD_INFO]
+
+ /* Go to end of page where stack begins (grows upwards) */
+ add2 \out, \out, (THREAD_SIZE)/4
+
+.endm
+
+/*
+ * @reg [OUT] thread_info->flags of "current"
+ */
+.macro GET_CURR_THR_INFO_FLAGS reg
+ GET_CURR_THR_INFO_FROM_SP \reg
+ ld \reg, [\reg, THREAD_INFO_FLAGS]
+.endm
+
+#ifdef CONFIG_SMP
+
+/*
+ * Retrieve the current running task on this CPU
+ * - loads it from backing _current_task[] (and can't use the
+ * caching reg for current task
+ */
+.macro GET_CURR_TASK_ON_CPU reg
+ GET_CPU_ID \reg
+ ld.as \reg, [@_current_task, \reg]
+.endm
+
+/*-------------------------------------------------
+ * Save a new task as the "current" task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ *
+ * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
+ * because ST r0, [r1, offset] can ONLY have s9 @offset
+ * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
+ */
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ GET_CPU_ID \tmp
+ add2 \tmp, @_current_task, \tmp
+ st \tsk, [\tmp]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov gp, \tsk
+#endif
+
+.endm
+
+
+#else /* Uniprocessor implementation of macros */
+
+.macro GET_CURR_TASK_ON_CPU reg
+ ld \reg, [@_current_task]
+.endm
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ st \tsk, [@_current_task]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov gp, \tsk
+#endif
+.endm
+
+#endif /* SMP / UNI */
+
+/*
+ * Get the ptr to some field of Current Task at @off in task struct
+ * - Uses current task cached in reg if enabled
+ */
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ add \reg, gp, \off
+.endm
+
+#else
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ GET_CURR_TASK_ON_CPU \reg
+ add \reg, \reg, \off
+.endm
+
+#endif /* CONFIG_ARC_CURR_IN_REG */
+
+#else /* !__ASSEMBLY__ */
+
+extern void do_signal(struct pt_regs *);
+extern void do_notify_resume(struct pt_regs *);
+extern int do_privilege_fault(unsigned long, struct pt_regs *);
+extern int do_extension_fault(unsigned long, struct pt_regs *);
+extern int insterror_is_error(unsigned long, struct pt_regs *);
+extern int do_memory_error(unsigned long, struct pt_regs *);
+extern int trap_is_brkpt(unsigned long, struct pt_regs *);
+extern int do_misaligned_error(unsigned long, struct pt_regs *);
+extern int do_trap5_error(unsigned long, struct pt_regs *);
+extern int do_misaligned_access(unsigned long, struct pt_regs *, struct callee_regs *);
+extern void do_machine_check_fault(unsigned long, struct pt_regs *);
+extern void do_non_swi_trap(unsigned long, struct pt_regs *);
+extern void do_insterror_or_kprobe(unsigned long, struct pt_regs *);
+extern void do_page_fault(unsigned long, struct pt_regs *);
+
+#endif
+
+#endif /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/exec.h b/arch/arc/include/asm/exec.h
new file mode 100644
index 0000000000..6134175d96
--- /dev/null
+++ b/arch/arc/include/asm/exec.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_ARC_EXEC_H
+#define __ASM_ARC_EXEC_H
+
+/* Align to 16b */
+#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
+
+#endif
diff --git a/arch/arc/include/asm/fb.h b/arch/arc/include/asm/fb.h
new file mode 100644
index 0000000000..9c2383d29c
--- /dev/null
+++ b/arch/arc/include/asm/fb.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <asm-generic/fb.h>
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/arc/include/asm/fpu.h b/arch/arc/include/asm/fpu.h
new file mode 100644
index 0000000000..006bcf88a7
--- /dev/null
+++ b/arch/arc/include/asm/fpu.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ */
+
+#ifndef _ASM_ARC_FPU_H
+#define _ASM_ARC_FPU_H
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ISA_ARCOMPACT
+
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+ struct {
+ unsigned int l, h;
+ } aux_dpfp[2];
+};
+
+#define fpu_init_task(regs)
+
+#else
+
+/*
+ * ARCv2 FPU Control aux register
+ * - bits to enable Traps on Exceptions
+ * - Rounding mode
+ *
+ * ARCv2 FPU Status aux register
+ * - FPU exceptions flags (Inv, Div-by-Zero, overflow, underflow, inexact)
+ * - Flag Write Enable to clear flags explicitly (vs. by fpu instructions
+ * only
+ */
+
+struct arc_fpu {
+ unsigned int ctrl, status;
+};
+
+extern void fpu_init_task(struct pt_regs *regs);
+
+#endif /* !CONFIG_ISA_ARCOMPACT */
+
+struct task_struct;
+
+extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
+
+#else /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+
+#define fpu_save_restore(p, n)
+#define fpu_init_task(regs)
+
+#endif /* CONFIG_ARC_FPU_SAVE_RESTORE */
+
+#endif /* _ASM_ARC_FPU_H */
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
new file mode 100644
index 0000000000..607d1c16d4
--- /dev/null
+++ b/arch/arc/include/asm/futex.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Vineetg: August 2010: From Android kernel work
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+ \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: llock %1, [%2] \n" \
+ insn "\n" \
+ "2: scond %0, [%2] \n" \
+ " bnz 1b \n" \
+ " mov %0, 0 \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: mov %0, %4 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .word 1b, 4b \n" \
+ " .word 2b, 4b \n" \
+ " .previous \n" \
+ \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
+ : "cc", "memory"); \
+ smp_mb() \
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+ \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: ld %1, [%2] \n" \
+ insn "\n" \
+ "2: st %0, [%2] \n" \
+ " mov %0, 0 \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: mov %0, %4 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .word 1b, 4b \n" \
+ " .word 2b, 4b \n" \
+ " .previous \n" \
+ \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
+ : "cc", "memory"); \
+ smp_mb() \
+
+#endif
+
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
+{
+ int oldval = 0, ret;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+#ifndef CONFIG_ARC_HAS_LLSC
+ preempt_disable(); /* to guarantee atomic r-m-w of futex op */
+#endif
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ /* oldval = *uaddr; *uaddr += oparg ; ret = *uaddr */
+ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+#ifndef CONFIG_ARC_HAS_LLSC
+ preempt_enable();
+#endif
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+/*
+ * cmpxchg of futex (pagefaults disabled by caller)
+ * Return 0 for success, -EFAULT otherwise
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 expval,
+ u32 newval)
+{
+ int ret = 0;
+ u32 existval;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+#ifndef CONFIG_ARC_HAS_LLSC
+ preempt_disable(); /* to guarantee atomic r-m-w of futex op */
+#endif
+ smp_mb();
+
+ __asm__ __volatile__(
+#ifdef CONFIG_ARC_HAS_LLSC
+ "1: llock %1, [%4] \n"
+ " brne %1, %2, 3f \n"
+ "2: scond %3, [%4] \n"
+ " bnz 1b \n"
+#else
+ "1: ld %1, [%4] \n"
+ " brne %1, %2, 3f \n"
+ "2: st %3, [%4] \n"
+#endif
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ "4: mov %0, %5 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 2b, 4b \n"
+ " .previous\n"
+ : "+&r"(ret), "=&r"(existval)
+ : "r"(expval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+ : "cc", "memory");
+
+ smp_mb();
+
+#ifndef CONFIG_ARC_HAS_LLSC
+ preempt_enable();
+#endif
+ *uval = existval;
+ return ret;
+}
+
+#endif
diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h
new file mode 100644
index 0000000000..a6b8e2c352
--- /dev/null
+++ b/arch/arc/include/asm/highmem.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef CONFIG_HIGHMEM
+
+#include <uapi/asm/page.h>
+#include <asm/kmap_size.h>
+
+#define FIXMAP_SIZE PGDIR_SIZE
+#define PKMAP_SIZE PGDIR_SIZE
+
+/* start after vmalloc area */
+#define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
+
+#define FIX_KMAP_SLOTS (KM_MAX_IDX * NR_CPUS)
+#define FIX_KMAP_BEGIN (0UL)
+#define FIX_KMAP_END ((FIX_KMAP_BEGIN + FIX_KMAP_SLOTS) - 1)
+
+#define FIXADDR_TOP (FIXMAP_BASE + (FIX_KMAP_END << PAGE_SHIFT))
+
+/*
+ * This should be converted to the asm-generic version, but of course this
+ * is needlessly different from all other architectures. Sigh - tglx
+ */
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) (((FIXADDR_TOP - ((x) & PAGE_MASK))) >> PAGE_SHIFT)
+
+/* start after fixmap area */
+#define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE)
+#define LAST_PKMAP (PKMAP_SIZE >> PAGE_SHIFT)
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+
+#include <asm/cacheflush.h>
+
+extern void kmap_init(void);
+
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
+
+static inline void flush_cache_kmaps(void)
+{
+ flush_cache_all();
+}
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
new file mode 100644
index 0000000000..ef8d416637
--- /dev/null
+++ b/arch/arc/include/asm/hugepage.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+
+#ifndef _ASM_ARC_HUGEPAGE_H
+#define _ASM_ARC_HUGEPAGE_H
+
+#include <linux/types.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
+#define pmd_mkinvalid(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
+
+#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
+
+#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))
+
+#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
+
+#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ /*
+ * open-coded pte_modify() with additional retaining of HW_SZ bit
+ * so that pmd_trans_huge() remains true for this PMD
+ */
+ return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot));
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd);
+
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+
+/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
+#define pmdp_establish generic_pmdp_establish
+
+#endif
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
new file mode 100644
index 0000000000..4fdb735063
--- /dev/null
+++ b/arch/arc/include/asm/io.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_IO_H
+#define _ASM_ARC_IO_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <asm/unaligned.h>
+
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb() rmb()
+#define __iowmb() wmb()
+#else
+#define __iormb() do { } while (0)
+#define __iowmb() do { } while (0)
+#endif
+
+extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
+#define ioremap ioremap
+#define ioremap_prot ioremap_prot
+#define iounmap iounmap
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return (void __iomem *)port;
+}
+
+static inline void ioport_unmap(void __iomem *addr)
+{
+}
+
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
+
+/* Change struct page to physical address */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 b;
+
+ __asm__ __volatile__(
+ " ldb%U1 %0, %1 \n"
+ : "=r" (b)
+ : "m" (*(volatile u8 __force *)addr)
+ : "memory");
+
+ return b;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 s;
+
+ __asm__ __volatile__(
+ " ldw%U1 %0, %1 \n"
+ : "=r" (s)
+ : "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+ return s;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 w;
+
+ __asm__ __volatile__(
+ " ld%U1 %0, %1 \n"
+ : "=r" (w)
+ : "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+ return w;
+}
+
+/*
+ * {read,write}s{b,w,l}() repeatedly access the same IO address in
+ * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
+ * @count times
+ */
+#define __raw_readsx(t,f) \
+static inline void __raw_reads##f(const volatile void __iomem *addr, \
+ void *ptr, unsigned int count) \
+{ \
+ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
+ u##t *buf = ptr; \
+ \
+ if (!count) \
+ return; \
+ \
+ /* Some ARC CPU's don't support unaligned accesses */ \
+ if (is_aligned) { \
+ do { \
+ u##t x = __raw_read##f(addr); \
+ *buf++ = x; \
+ } while (--count); \
+ } else { \
+ do { \
+ u##t x = __raw_read##f(addr); \
+ put_unaligned(x, buf++); \
+ } while (--count); \
+ } \
+}
+
+#define __raw_readsb __raw_readsb
+__raw_readsx(8, b)
+#define __raw_readsw __raw_readsw
+__raw_readsx(16, w)
+#define __raw_readsl __raw_readsl
+__raw_readsx(32, l)
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stb%U1 %0, %1 \n"
+ :
+ : "r" (b), "m" (*(volatile u8 __force *)addr)
+ : "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 s, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stw%U1 %0, %1 \n"
+ :
+ : "r" (s), "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " st%U1 %0, %1 \n"
+ :
+ : "r" (w), "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+}
+
+#define __raw_writesx(t,f) \
+static inline void __raw_writes##f(volatile void __iomem *addr, \
+ const void *ptr, unsigned int count) \
+{ \
+ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
+ const u##t *buf = ptr; \
+ \
+ if (!count) \
+ return; \
+ \
+ /* Some ARC CPU's don't support unaligned accesses */ \
+ if (is_aligned) { \
+ do { \
+ __raw_write##f(*buf++, addr); \
+ } while (--count); \
+ } else { \
+ do { \
+ __raw_write##f(get_unaligned(buf++), addr); \
+ } while (--count); \
+ } \
+}
+
+#define __raw_writesb __raw_writesb
+__raw_writesx(8, b)
+#define __raw_writesw __raw_writesw
+__raw_writesx(16, w)
+#define __raw_writesl __raw_writesl
+__raw_writesx(32, l)
+
+/*
+ * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+ * Based on ARM model for the typical use case
+ *
+ * <ST [DMA buffer]>
+ * <writel MMIO "go" reg>
+ * or:
+ * <readl MMIO "status" reg>
+ * <LD [DMA buffer]>
+ *
+ * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
+ */
+#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
+#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
+#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
+
+#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
+#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
+#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
+#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
+#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
+#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
+
+/*
+ * Relaxed API for drivers which can handle barrier ordering themselves
+ *
+ * Also these are defined to perform little endian accesses.
+ * To provide the typical device register semantics of fixed endian,
+ * swap the byte order for Big Endian
+ *
+ * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
+ */
+#define readb_relaxed(c) __raw_readb(c)
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(c)); __r; })
+
+#define writeb_relaxed(v,c) __raw_writeb(v,c)
+#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
new file mode 100644
index 0000000000..c574712ad8
--- /dev/null
+++ b/arch/arc/include/asm/irq.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_ARC_IRQ_H
+#define __ASM_ARC_IRQ_H
+
+/*
+ * ARCv2 can support 240 interrupts in the core interrupts controllers and
+ * 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most
+ * configurations of boards.
+ * This doesnt affect ARCompact, but we change it to same value
+ */
+#define NR_IRQS 512
+
+/* Platform Independent IRQs */
+#ifdef CONFIG_ISA_ARCV2
+#define IPI_IRQ 19
+#define SOFTIRQ_IRQ 21
+#define FIRST_EXT_IRQ 24
+#endif
+
+#include <linux/interrupt.h>
+#include <asm-generic/irq.h>
+
+extern void arc_init_IRQ(void);
+extern void arch_do_IRQ(unsigned int, struct pt_regs *);
+
+#endif
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
new file mode 100644
index 0000000000..fb3c21f1a2
--- /dev/null
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_IRQFLAGS_ARCV2_H
+#define __ASM_IRQFLAGS_ARCV2_H
+
+#include <asm/arcregs.h>
+
+/* status32 Bits */
+#define STATUS_AD_BIT 19 /* Disable Align chk: core supports non-aligned */
+#define STATUS_IE_BIT 31
+
+#define STATUS_AD_MASK (1<<STATUS_AD_BIT)
+#define STATUS_IE_MASK (1<<STATUS_IE_BIT)
+
+/* status32 Bits as encoded/expected by CLRI/SETI */
+#define CLRI_STATUS_IE_BIT 4
+
+#define CLRI_STATUS_E_MASK 0xF
+#define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT)
+
+#define AUX_USER_SP 0x00D
+#define AUX_IRQ_CTRL 0x00E
+#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
+#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+#define AUX_IRQ_PRIORITY 0x206
+#define ICAUSE 0x40a
+#define AUX_IRQ_SELECT 0x40b
+#define AUX_IRQ_ENABLE 0x40c
+
+/* Was Intr taken in User Mode */
+#define AUX_IRQ_ACT_BIT_U 31
+
+/*
+ * Hardware supports 16 priorities (0 highest, 15 lowest)
+ * Linux by default runs at 1, priority 0 reserved for NMI style interrupts
+ */
+#define ARCV2_IRQ_DEF_PRIO 1
+
+/* seed value for status register */
+#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+#define __AD_ENB STATUS_AD_MASK
+#else
+#define __AD_ENB 0
+#endif
+
+#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | __AD_ENB | \
+ (ARCV2_IRQ_DEF_PRIO << 1))
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__(" clri %0 \n" : "=r" (flags) : : "memory");
+
+ return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ __asm__ __volatile__(" seti %0 \n" : : "r" (flags) : "memory");
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+static inline void arch_local_irq_enable(void)
+{
+ unsigned int irqact = read_aux_reg(AUX_IRQ_ACT);
+
+ if (irqact & 0xffff)
+ write_aux_reg(AUX_IRQ_ACT, irqact & ~0xffff);
+
+ __asm__ __volatile__(" seti \n" : : : "memory");
+}
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+ __asm__ __volatile__(" clri \n" : : : "memory");
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ : "=&r"(temp)
+ :
+ : "memory");
+
+ /* To be compatible with irq_save()/irq_restore()
+ * encode the irq bits as expected by CLRI/SETI
+ * (this was needed to make CONFIG_TRACE_IRQFLAGS work)
+ */
+ temp = (1 << 5) |
+ ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
+ ((temp >> 1) & CLRI_STATUS_E_MASK);
+ return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & CLRI_STATUS_IE_MASK);
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arc_softirq_trigger(int irq)
+{
+ write_aux_reg(AUX_IRQ_HINT, irq);
+}
+
+static inline void arc_softirq_clear(int irq)
+{
+ write_aux_reg(AUX_IRQ_HINT, 0);
+}
+
+#else
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
+.macro IRQ_DISABLE scratch
+ clri
+ TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
+ seti
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
new file mode 100644
index 0000000000..0d63e568d6
--- /dev/null
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_IRQFLAGS_ARCOMPACT_H
+#define __ASM_IRQFLAGS_ARCOMPACT_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ * -Remove explicit mov of current status32 into reg, that is not needed
+ * -Use BIC insn instead of INVERTED + AND
+ * -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#include <asm/arcregs.h>
+
+/* status32 Reg bits related to Interrupt Handling */
+#define STATUS_E1_BIT 1 /* Int 1 enable */
+#define STATUS_E2_BIT 2 /* Int 2 enable */
+#define STATUS_A1_BIT 3 /* Int 1 active */
+#define STATUS_A2_BIT 4 /* Int 2 active */
+#define STATUS_AE_BIT 5 /* Exception active */
+
+#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
+#define STATUS_IE_MASK (STATUS_E1_MASK | STATUS_E2_MASK)
+
+/* Other Interrupt Handling related Aux regs */
+#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+#define AUX_IRQ_LV12 0x43 /* interrupt level register */
+
+#define AUX_IENABLE 0x40c
+#define AUX_ITRIGGER 0x40d
+#define AUX_IPULSE 0x415
+
+#define ISA_INIT_STATUS_BITS STATUS_IE_MASK
+
+#ifndef __ASSEMBLY__
+
+/******************************************************************
+ * IRQ Control Macros
+ *
+ * All of them have "memory" clobber (compiler barrier) which is needed to
+ * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
+ *
+ * Noted at the time of Abilis Timer List corruption
+ *
+ * Orig Bug + Rejected solution:
+ * https://lore.kernel.org/lkml/1364553218-31255-1-git-send-email-vgupta@synopsys.com
+ *
+ * Reasoning:
+ * https://lore.kernel.org/lkml/CA+55aFyFWjpSVQM6M266tKrG_ZXJzZ-nYejpmXYQXbrr42mGPQ@mail.gmail.com
+ *
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+ unsigned long temp, flags;
+
+ __asm__ __volatile__(
+ " lr %1, [status32] \n"
+ " bic %0, %1, %2 \n"
+ " and.f 0, %1, %2 \n"
+ " flag.nz %0 \n"
+ : "=r"(temp), "=r"(flags)
+ : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+ : "memory", "cc");
+
+ return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+ __asm__ __volatile__(
+ " flag %0 \n"
+ :
+ : "r"(flags)
+ : "memory");
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+extern void arch_local_irq_enable(void);
+#else
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ " or %0, %0, %1 \n"
+ " flag %0 \n"
+ : "=&r"(temp)
+ : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+ : "cc", "memory");
+}
+#endif
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ " and %0, %0, %1 \n"
+ " flag %0 \n"
+ : "=&r"(temp)
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
+ : "memory");
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ : "=&r"(temp)
+ :
+ : "memory");
+
+ return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ | STATUS_E2_MASK
+#endif
+ ));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#else
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
+
+.macro IRQ_DISABLE scratch
+ lr \scratch, [status32]
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+ TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
+ lr \scratch, [status32]
+ or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
new file mode 100644
index 0000000000..edf201a699
--- /dev/null
+++ b/arch/arc/include/asm/irqflags.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_ARC_IRQFLAGS_H
+#define __ASM_ARC_IRQFLAGS_H
+
+#ifdef CONFIG_ISA_ARCOMPACT
+#include <asm/irqflags-compact.h>
+#else
+#include <asm/irqflags-arcv2.h>
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/jump_label.h b/arch/arc/include/asm/jump_label.h
new file mode 100644
index 0000000000..9d96180797
--- /dev/null
+++ b/arch/arc/include/asm/jump_label.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARC_JUMP_LABEL_H
+#define _ASM_ARC_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+/*
+ * NOTE about '.balign 4':
+ *
+ * To make atomic update of patched instruction available we need to guarantee
+ * that this instruction doesn't cross L1 cache line boundary.
+ *
+ * As of today we simply align instruction which can be patched by 4 byte using
+ * ".balign 4" directive. In that case patched instruction is aligned with one
+ * 16-bit NOP_S if this is required.
+ * However 'align by 4' directive is much stricter than it actually required.
+ * It's enough that our 32-bit instruction don't cross L1 cache line boundary /
+ * L1 I$ fetch block boundary which can be achieved by using
+ * ".bundle_align_mode" assembler directive. That will save us from adding
+ * useless NOP_S padding in most of the cases.
+ *
+ * TODO: switch to ".bundle_align_mode" directive using whin it will be
+ * supported by ARC toolchain.
+ */
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
+ "1: \n"
+ "nop \n"
+ ".pushsection __jump_table, \"aw\" \n"
+ ".word 1b, %l[l_yes], %c0 \n"
+ ".popsection \n"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
+ "1: \n"
+ "b %l[l_yes] \n"
+ ".pushsection __jump_table, \"aw\" \n"
+ ".word 1b, %l[l_yes], %c0 \n"
+ ".popsection \n"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/arc/include/asm/kdebug.h b/arch/arc/include/asm/kdebug.h
new file mode 100644
index 0000000000..f92049d1d3
--- /dev/null
+++ b/arch/arc/include/asm/kdebug.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_IERR,
+ DIE_OOPS
+};
+
+#endif
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
new file mode 100644
index 0000000000..f9f71b9096
--- /dev/null
+++ b/arch/arc/include/asm/kgdb.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ARC_KGDB_H__
+#define __ARC_KGDB_H__
+
+#ifdef CONFIG_KGDB
+
+#include <asm/ptrace.h>
+
+/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
+ * register API yet */
+#undef DBG_MAX_REG_NUM
+
+#define GDB_MAX_REGS 87
+
+#define BREAK_INSTR_SIZE 2
+#define CACHE_FLUSH_IS_SAFE 1
+#define NUMREGBYTES (GDB_MAX_REGS * 4)
+#define BUFMAX 2048
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__ ("trap_s 0x4\n");
+}
+
+extern void kgdb_trap(struct pt_regs *regs);
+
+/* This is the numbering of registers according to the GDB. See GDB's
+ * arc-tdep.h for details.
+ *
+ * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
+enum arc_linux_regnums {
+ _R0 = 0,
+ _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+ _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+ _R25, _R26,
+ _FP = 27,
+ __SP = 28,
+ _R30 = 30,
+ _BLINK = 31,
+ _LP_COUNT = 60,
+ _STOP_PC = 64,
+ _RET = 64,
+ _LP_START = 65,
+ _LP_END = 66,
+ _STATUS32 = 67,
+ _ECR = 76,
+ _BTA = 82,
+};
+
+#else
+#define kgdb_trap(regs)
+#endif
+
+#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
new file mode 100644
index 0000000000..de1566e32c
--- /dev/null
+++ b/arch/arc/include/asm/kprobes.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ARC_KPROBES_H
+#define _ARC_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#ifdef CONFIG_KPROBES
+
+typedef u16 kprobe_opcode_t;
+
+#define UNIMP_S_INSTRUCTION 0x79e0
+#define TRAP_S_2_INSTRUCTION 0x785e
+
+#define MAX_INSN_SIZE 8
+#define MAX_STACK_SIZE 64
+
+struct arch_specific_insn {
+ int is_short;
+ kprobe_opcode_t *t1_addr, *t2_addr;
+ kprobe_opcode_t t1_opcode, t2_opcode;
+};
+
+#define flush_insn_slot(p) do { } while (0)
+
+#define kretprobe_blacklist_size 0
+
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *p);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+};
+
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ struct prev_kprobe prev_kprobe;
+};
+
+int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
+void __kretprobe_trampoline(void);
+void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
+#else
+#define trap_is_kprobe(address, regs)
+#endif /* CONFIG_KPROBES */
+
+#endif /* _ARC_KPROBES_H */
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
new file mode 100644
index 0000000000..8a3fb71e9c
--- /dev/null
+++ b/arch/arc/include/asm/linkage.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#include <asm/dwarf.h>
+
+#define ASM_NL ` /* use '`' to mark new line in macro */
+#define __ALIGN .align 4
+#define __ALIGN_STR __stringify(__ALIGN)
+
+#ifdef __ASSEMBLY__
+
+.macro ST2 e, o, off
+#ifdef CONFIG_ARC_HAS_LL64
+ std \e, [sp, \off]
+#else
+ st \e, [sp, \off]
+ st \o, [sp, \off+4]
+#endif
+.endm
+
+.macro LD2 e, o, off
+#ifdef CONFIG_ARC_HAS_LL64
+ ldd \e, [sp, \off]
+#else
+ ld \e, [sp, \off]
+ ld \o, [sp, \off+4]
+#endif
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_DATA nm
+#ifdef CONFIG_ARC_HAS_DCCM
+ .section .data.arcfp
+#else
+ .section .data
+#endif
+ .global \nm
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_CODE
+#ifdef CONFIG_ARC_HAS_ICCM
+ .section .text.arcfp, "ax",@progbits
+#else
+ .section .text, "ax",@progbits
+#endif
+.endm
+
+#define ENTRY_CFI(name) \
+ .globl name ASM_NL \
+ ALIGN ASM_NL \
+ name: ASM_NL \
+ CFI_STARTPROC ASM_NL
+
+#define END_CFI(name) \
+ CFI_ENDPROC ASM_NL \
+ .size name, .-name
+
+#else /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_ARC_HAS_ICCM
+#define __arcfp_code __section(".text.arcfp")
+#else
+#define __arcfp_code __section(".text")
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCCM
+#define __arcfp_data __section(".data.arcfp")
+#else
+#define __arcfp_data __section(".data")
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
new file mode 100644
index 0000000000..c4e1970593
--- /dev/null
+++ b/arch/arc/include/asm/mach_desc.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * based on METAG mach/arch.h (which in turn was based on ARM)
+ */
+
+#ifndef _ASM_ARC_MACH_DESC_H_
+#define _ASM_ARC_MACH_DESC_H_
+
+/**
+ * struct machine_desc - Board specific callbacks, called from ARC common code
+ * Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
+ * a multi-platform kernel builds with array of such descriptors.
+ * We extend the early DT scan to also match the DT's "compatible" string
+ * against the @dt_compat of all such descriptors, and one with highest
+ * "DT score" is selected as global @machine_desc.
+ *
+ * @name: Board/SoC name
+ * @dt_compat: Array of device tree 'compatible' strings
+ * (XXX: although only 1st entry is looked at)
+ * @init_early: Very early callback [called from setup_arch()]
+ * @init_per_cpu: for each CPU as it is coming up (SMP as well as UP)
+ * [(M):init_IRQ(), (o):start_kernel_secondary()]
+ * @init_machine: arch initcall level callback (e.g. populate static
+ * platform devices or parse Devicetree)
+ * @init_late: Late initcall level callback
+ *
+ */
+struct machine_desc {
+ const char *name;
+ const char **dt_compat;
+ void (*init_early)(void);
+ void (*init_per_cpu)(unsigned int);
+ void (*init_machine)(void);
+ void (*init_late)(void);
+
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern const struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
+
+/*
+ * Set of macros to define architecture features.
+ * This is built into a table by the linker.
+ */
+#define MACHINE_START(_type, _name) \
+static const struct machine_desc __mach_desc_##_type \
+__used __section(".arch.info.init") = { \
+ .name = _name,
+
+#define MACHINE_END \
+};
+
+extern const struct machine_desc *setup_machine_fdt(void *dt);
+
+#endif
diff --git a/arch/arc/include/asm/mmu-arcv2.h b/arch/arc/include/asm/mmu-arcv2.h
new file mode 100644
index 0000000000..ed9036d4ed
--- /dev/null
+++ b/arch/arc/include/asm/mmu-arcv2.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012, 2019-20 Synopsys, Inc. (www.synopsys.com)
+ *
+ * MMUv3 (arc700) / MMUv4 (archs) are software page walked and software managed.
+ * This file contains the TLB access registers and commands
+ */
+
+#ifndef _ASM_ARC_MMU_ARCV2_H
+#define _ASM_ARC_MMU_ARCV2_H
+
+/*
+ * TLB Management regs
+ */
+#define ARC_REG_MMU_BCR 0x06f
+
+#ifdef CONFIG_ARC_MMU_V3
+#define ARC_REG_TLBPD0 0x405
+#define ARC_REG_TLBPD1 0x406
+#define ARC_REG_TLBPD1HI 0 /* Dummy: allows common code */
+#define ARC_REG_TLBINDEX 0x407
+#define ARC_REG_TLBCOMMAND 0x408
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+#else
+#define ARC_REG_TLBPD0 0x460
+#define ARC_REG_TLBPD1 0x461
+#define ARC_REG_TLBPD1HI 0x463
+#define ARC_REG_TLBINDEX 0x464
+#define ARC_REG_TLBCOMMAND 0x465
+#define ARC_REG_PID 0x468
+#define ARC_REG_SCRATCH_DATA0 0x46c
+#endif
+
+/* Bits in MMU PID reg */
+#define __TLB_ENABLE (1 << 31)
+#define __PROG_ENABLE (1 << 30)
+#define MMU_ENABLE (__TLB_ENABLE | __PROG_ENABLE)
+
+/* Bits in TLB Index reg */
+#define TLB_LKUP_ERR 0x80000000
+
+#ifdef CONFIG_ARC_MMU_V3
+#define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001)
+#else
+#define TLB_DUP_ERR (TLB_LKUP_ERR | 0x40000000)
+#endif
+
+/*
+ * TLB Commands
+ */
+#define TLBWrite 0x1
+#define TLBRead 0x2
+#define TLBGetIndex 0x3
+#define TLBProbe 0x4
+#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
+#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
+
+#ifdef CONFIG_ARC_MMU_V4
+#define TLBInsertEntry 0x7
+#define TLBDeleteEntry 0x8
+#endif
+
+/* Masks for actual TLB "PD"s */
+#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
+#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+
+#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
+
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
+extern int pae40_exist_but_not_enab(void);
+
+static inline int is_pae40_enabled(void)
+{
+ return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
+}
+
+static inline void mmu_setup_asid(struct mm_struct *mm, unsigned long asid)
+{
+ write_aux_reg(ARC_REG_PID, asid | MMU_ENABLE);
+}
+
+static inline void mmu_setup_pgd(struct mm_struct *mm, void *pgd)
+{
+ /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+#ifdef CONFIG_ISA_ARCV2
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, (unsigned int)pgd);
+#endif
+}
+
+#else
+
+.macro ARC_MMU_REENABLE reg
+ lr \reg, [ARC_REG_PID]
+ or \reg, \reg, MMU_ENABLE
+ sr \reg, [ARC_REG_PID]
+.endm
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
new file mode 100644
index 0000000000..9febf5bc3d
--- /dev/null
+++ b/arch/arc/include/asm/mmu.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_MMU_H
+#define _ASM_ARC_MMU_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/threads.h> /* NR_CPUS */
+
+typedef struct {
+ unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
+} mm_context_t;
+
+extern void do_tlb_overlap_fault(unsigned long, unsigned long, struct pt_regs *);
+
+#endif
+
+#include <asm/mmu-arcv2.h>
+
+#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
new file mode 100644
index 0000000000..dda471f5f0
--- /dev/null
+++ b/arch/arc/include/asm/mmu_context.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * vineetg: May 2011
+ * -Refactored get_new_mmu_context( ) to only handle live-mm.
+ * retiring-mm handled in other hooks
+ *
+ * Vineetg: March 25th, 2008: Bug #92690
+ * -Major rewrite of Core ASID allocation routine get_new_mmu_context
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_MMU_CONTEXT_H
+#define _ASM_ARC_MMU_CONTEXT_H
+
+#include <linux/sched/mm.h>
+
+#include <asm/tlb.h>
+#include <asm-generic/mm_hooks.h>
+
+/* ARC ASID Management
+ *
+ * MMU tags TLBs with an 8-bit ASID, avoiding need to flush the TLB on
+ * context-switch.
+ *
+ * ASID is managed per cpu, so task threads across CPUs can have different
+ * ASID. Global ASID management is needed if hardware supports TLB shootdown
+ * and/or shared TLB across cores, which ARC doesn't.
+ *
+ * Each task is assigned unique ASID, with a simple round-robin allocator
+ * tracked in @asid_cpu. When 8-bit value rolls over,a new cycle is started
+ * over from 0, and TLB is flushed
+ *
+ * A new allocation cycle, post rollover, could potentially reassign an ASID
+ * to a different task. Thus the rule is to refresh the ASID in a new cycle.
+ * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
+ * serve as cycle/generation indicator and natural 32 bit unsigned math
+ * automagically increments the generation when lower 8 bits rollover.
+ */
+
+#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
+#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)
+
+#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
+#define MM_CTXT_NO_ASID 0UL
+
+#define asid_mm(mm, cpu) mm->context.asid[cpu]
+#define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
+
+DECLARE_PER_CPU(unsigned int, asid_cache);
+#define asid_cpu(cpu) per_cpu(asid_cache, cpu)
+
+/*
+ * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
+ * Also set the MMU PID register to existing/updated ASID
+ */
+static inline void get_new_mmu_context(struct mm_struct *mm)
+{
+ const unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * Move to new ASID if it was not from current alloc-cycle/generation.
+ * This is done by ensuring that the generation bits in both mm->ASID
+ * and cpu's ASID counter are exactly same.
+ *
+ * Note: Callers needing new ASID unconditionally, independent of
+ * generation, e.g. local_flush_tlb_mm() for forking parent,
+ * first need to destroy the context, setting it to invalid
+ * value.
+ */
+ if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
+ goto set_hw;
+
+ /* move to new ASID and handle rollover */
+ if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
+
+ local_flush_tlb_all();
+
+ /*
+ * Above check for rollover of 8 bit ASID in 32 bit container.
+ * If the container itself wrapped around, set it to a non zero
+ * "generation" to distinguish from no context
+ */
+ if (!asid_cpu(cpu))
+ asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
+ }
+
+ /* Assign new ASID to tsk */
+ asid_mm(mm, cpu) = asid_cpu(cpu);
+
+set_hw:
+ mmu_setup_asid(mm, hw_pid(mm, cpu));
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+#define init_new_context init_new_context
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ asid_mm(mm, i) = MM_CTXT_NO_ASID;
+
+ return 0;
+}
+
+#define destroy_context destroy_context
+static inline void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ /* Needed to elide CONFIG_DEBUG_PREEMPT warning */
+ local_irq_save(flags);
+ asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
+ local_irq_restore(flags);
+}
+
+/* Prepare the MMU for task: setup PID reg with allocated ASID
+ If task doesn't have an ASID (never alloc or stolen, get a new ASID)
+*/
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ const int cpu = smp_processor_id();
+
+ /*
+ * Note that the mm_cpumask is "aggregating" only, we don't clear it
+ * for the switched-out task, unlike some other arches.
+ * It is used to enlist cpus for sending TLB flush IPIs and not sending
+ * it to CPUs where a task once ran-on, could cause stale TLB entry
+ * re-use, specially for a multi-threaded task.
+ * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
+ * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
+ * were to re-migrate to C1, it could access the unmapped region
+ * via any existing stale TLB entries.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ mmu_setup_pgd(next, next->pgd);
+
+ get_new_mmu_context(next);
+}
+
+/*
+ * activate_mm defaults (in asm-generic) to switch_mm and is called at the
+ * time of execve() to get a new ASID Note the subtlety here:
+ * get_new_mmu_context() behaves differently here vs. in switch_mm(). Here
+ * it always returns a new ASID, because mm has an unallocated "initial"
+ * value, while in latter, it moves to a new ASID, only if it was
+ * unallocated
+ */
+
+/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
+ * for retiring-mm. However destroy_context( ) still needs to do that because
+ * between mm_release( ) = >deactive_mm( ) and
+ * mmput => .. => __mmdrop( ) => destroy_context( )
+ * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * again (this teased me for a whole day).
+ */
+
+#include <asm-generic/mmu_context.h>
+
+#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
new file mode 100644
index 0000000000..f534a1fef0
--- /dev/null
+++ b/arch/arc/include/asm/module.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_MODULE_H
+#define _ASM_ARC_MODULE_H
+
+#include <asm-generic/module.h>
+
+struct mod_arch_specific {
+#ifdef CONFIG_ARC_DW2_UNWIND
+ void *unw_info;
+ int unw_sec_idx;
+#endif
+ const char *secstr;
+};
+
+#endif /* _ASM_ARC_MODULE_H */
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
new file mode 100644
index 0000000000..02b53ad811
--- /dev/null
+++ b/arch/arc/include/asm/page.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+#ifndef __ASM_ARC_PAGE_H
+#define __ASM_ARC_PAGE_H
+
+#include <uapi/asm/page.h>
+
+#ifdef CONFIG_ARC_HAS_PAE40
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
+
+#else /* CONFIG_ARC_HAS_PAE40 */
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#define PAGE_MASK_PHYS PAGE_MASK
+
+#endif /* CONFIG_ARC_HAS_PAE40 */
+
+#ifndef __ASSEMBLY__
+
+#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+struct vm_area_struct;
+struct page;
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long u_vaddr, struct vm_area_struct *vma);
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
+
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+
+#define pgd_val(x) ((x).pgd)
+#define __pgd(x) ((pgd_t) { (x) })
+
+#if CONFIG_PGTABLE_LEVELS > 3
+
+typedef struct {
+ unsigned long pud;
+} pud_t;
+
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+
+typedef struct {
+ unsigned long pmd;
+} pmd_t;
+
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+#endif
+
+typedef struct {
+#ifdef CONFIG_ARC_HAS_PAE40
+ unsigned long long pte;
+#else
+ unsigned long pte;
+#endif
+} pte_t;
+
+#define pte_val(x) ((x).pte)
+#define __pte(x) ((pte_t) { (x) })
+
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) })
+#define pte_pgprot(x) __pgprot(pte_val(x))
+
+typedef struct page *pgtable_t;
+
+/*
+ * Use virt_to_pfn with caution:
+ * If used in pte or paddr related macros, it could cause truncation
+ * in PAE40 builds
+ * As a rule of thumb, only use it in helpers starting with virt_
+ * You have been warned !
+ */
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+
+/*
+ * When HIGHMEM is enabled we have holes in the memory map so we need
+ * pfn_valid() that takes into account the actual extents of the physical
+ * memory
+ */
+#ifdef CONFIG_HIGHMEM
+
+extern unsigned long arch_pfn_offset;
+#define ARCH_PFN_OFFSET arch_pfn_offset
+
+extern int pfn_valid(unsigned long pfn);
+#define pfn_valid pfn_valid
+
+#else /* CONFIG_HIGHMEM */
+
+#define ARCH_PFN_OFFSET virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE)
+
+#endif /* CONFIG_HIGHMEM */
+
+/*
+ * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
+ *
+ * These macros have historically been misnamed
+ * virt here means link-address/program-address as embedded in object code.
+ * And for ARC, link-addr = physical address
+ */
+#define __pa(vaddr) ((unsigned long)(vaddr))
+#define __va(paddr) ((void *)((unsigned long)(paddr)))
+
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
+
+/* Default Permissions for stack/heaps pages (Non Executable) */
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
+
+#define WANT_PAGE_VIRTUAL 1
+
+#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/pci.h b/arch/arc/include/asm/pci.h
new file mode 100644
index 0000000000..a6858e1117
--- /dev/null
+++ b/arch/arc/include/asm/pci.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_PCI_H
+#define _ASM_ARC_PCI_H
+
+#ifdef __KERNEL__
+#include <linux/ioport.h>
+
+#define PCIBIOS_MIN_IO 0x100
+#define PCIBIOS_MIN_MEM 0x100000
+
+#define pcibios_assign_all_busses() 1
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_PCI_H */
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
new file mode 100644
index 0000000000..d5719a2608
--- /dev/null
+++ b/arch/arc/include/asm/perf_event.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linux performance counter support for ARC
+ *
+ * Copyright (C) 2014-2015 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_PERF_EVENT_H
+#define __ASM_PERF_EVENT_H
+
+/* Max number of counters that PCT block may ever have */
+#define ARC_PERF_MAX_COUNTERS 32
+
+#define ARC_REG_CC_BUILD 0xF6
+#define ARC_REG_CC_INDEX 0x240
+#define ARC_REG_CC_NAME0 0x241
+#define ARC_REG_CC_NAME1 0x242
+
+#define ARC_REG_PCT_BUILD 0xF5
+#define ARC_REG_PCT_COUNTL 0x250
+#define ARC_REG_PCT_COUNTH 0x251
+#define ARC_REG_PCT_SNAPL 0x252
+#define ARC_REG_PCT_SNAPH 0x253
+#define ARC_REG_PCT_CONFIG 0x254
+#define ARC_REG_PCT_CONTROL 0x255
+#define ARC_REG_PCT_INDEX 0x256
+#define ARC_REG_PCT_INT_CNTL 0x25C
+#define ARC_REG_PCT_INT_CNTH 0x25D
+#define ARC_REG_PCT_INT_CTRL 0x25E
+#define ARC_REG_PCT_INT_ACT 0x25F
+
+#define ARC_REG_PCT_CONFIG_USER (1 << 18) /* count in user mode */
+#define ARC_REG_PCT_CONFIG_KERN (1 << 19) /* count in kernel mode */
+
+#define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */
+#define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */
+
+struct arc_reg_pct_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int m:8, c:8, r:5, i:1, s:2, v:8;
+#else
+ unsigned int v:8, s:2, i:1, r:5, c:8, m:8;
+#endif
+};
+
+struct arc_reg_cc_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int c:16, r:8, v:8;
+#else
+ unsigned int v:8, r:8, c:16;
+#endif
+};
+
+#define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0)
+#define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1)
+#define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2)
+#define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3)
+#define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4)
+#define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5)
+#define PERF_COUNT_ARC_LDC (PERF_COUNT_HW_MAX + 6)
+#define PERF_COUNT_ARC_STC (PERF_COUNT_HW_MAX + 7)
+
+#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 8)
+
+#ifdef CONFIG_PERF_EVENTS
+#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
+#endif
+
+#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
new file mode 100644
index 0000000000..096b8ef58e
--- /dev/null
+++ b/arch/arc/include/asm/pgalloc.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * vineetg: June 2011
+ * -"/proc/meminfo | grep PageTables" kept on increasing
+ * Recently added pgtable dtor was not getting called.
+ *
+ * vineetg: May 2011
+ * -Variable pg-sz means that Page Tables could be variable sized themselves
+ * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
+ * -Page Table size capped to max 1 to save memory - hence verified.
+ * -Since these deal with constants, gcc compile-time optimizes them.
+ *
+ * vineetg: Nov 2010
+ * -Added pgtable ctor/dtor used for pgtable mem accounting
+ *
+ * vineetg: April 2010
+ * -Switched pgtable_t from being struct page * to unsigned long
+ * =Needed so that Page Table allocator (pte_alloc_one) is not forced to
+ * deal with struct page. That way in future we can make it allocate
+ * multiple PG Tbls in one Page Frame
+ * =sweet side effect is avoiding calls to ugly page_address( ) from the
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate)
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGALLOC_H
+#define _ASM_ARC_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/log2.h>
+#include <asm-generic/pgalloc.h>
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+ /*
+ * The cast to long below is OK in 32-bit PAE40 regime with long long pte
+ * Despite "wider" pte, the pte table needs to be in non-PAE low memory
+ * as all higher levels can only hold long pointers.
+ *
+ * The cast itself is needed given simplistic definition of set_pmd()
+ */
+ set_pmd(pmd, __pmd((unsigned long)pte));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page)
+{
+ set_pmd(pmd, __pmd((unsigned long)page_address(pte_page)));
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
+
+ if (ret) {
+ int num, num2;
+ num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
+ memzero(ret, num * sizeof(pgd_t));
+
+ num2 = VMALLOC_SIZE / PGDIR_SIZE;
+ memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
+
+ memzero(ret + num + num2,
+ (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
+
+ }
+ return ret;
+}
+
+#if CONFIG_PGTABLE_LEVELS > 3
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp)
+{
+ set_p4d(p4dp, __p4d((unsigned long)pudp));
+}
+
+#define __pud_free_tlb(tlb, pmd, addr) pud_free((tlb)->mm, pmd)
+
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
+{
+ set_pud(pudp, __pud((unsigned long)pmdp));
+}
+
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+
+#endif
+
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+
+#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
new file mode 100644
index 0000000000..f3eea3f30b
--- /dev/null
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * page table flags for software walked/managed MMUv3 (ARC700) and MMUv4 (HS)
+ * There correspond to the corresponding bits in the TLB
+ */
+
+#ifndef _ASM_ARC_PGTABLE_BITS_ARCV2_H
+#define _ASM_ARC_PGTABLE_BITS_ARCV2_H
+
+#ifdef CONFIG_ARC_CACHE_PAGES
+#define _PAGE_CACHEABLE (1 << 0) /* Cached (H) */
+#else
+#define _PAGE_CACHEABLE 0
+#endif
+
+#define _PAGE_EXECUTE (1 << 1) /* User Execute (H) */
+#define _PAGE_WRITE (1 << 2) /* User Write (H) */
+#define _PAGE_READ (1 << 3) /* User Read (H) */
+#define _PAGE_ACCESSED (1 << 4) /* Accessed (s) */
+#define _PAGE_DIRTY (1 << 5) /* Modified (s) */
+#define _PAGE_SPECIAL (1 << 6)
+#define _PAGE_GLOBAL (1 << 8) /* ASID agnostic (H) */
+#define _PAGE_PRESENT (1 << 9) /* PTE/TLB Valid (H) */
+
+/* We borrow bit 5 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_DIRTY
+
+#ifdef CONFIG_ARC_MMU_V4
+#define _PAGE_HW_SZ (1 << 10) /* Normal/super (H) */
+#else
+#define _PAGE_HW_SZ 0
+#endif
+
+/* Defaults for every user page */
+#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SPECIAL)
+
+/* More Abbrevaited helpers */
+#define PAGE_U_NONE __pgprot(___DEF)
+#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
+#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
+#define PAGE_U_X_W_R __pgprot(___DEF \
+ | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
+#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL \
+ | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
+
+#define PAGE_SHARED PAGE_U_W_R
+
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
+
+/*
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+ *
+ * Certain cases have 1:1 mapping
+ * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
+ * which directly corresponds to PAGE_U_X_R
+ *
+ * Other rules which cause the divergence from 1:1 mapping
+ *
+ * 1. Although ARC700 can do exclusive execute/write protection (meaning R
+ * can be tracked independet of X/W unlike some other CPUs), still to
+ * keep things consistent with other archs:
+ * -Write implies Read: W => R
+ * -Execute implies Read: X => R
+ *
+ * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
+ * This is to enable COW mechanism
+ */
+ /* xwr */
+#ifndef __ASSEMBLY__
+
+#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
+#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
+#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
+#define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
+
+#define PTE_BIT_FUNC(fn, op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
+PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite_novma, |= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
+PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
+PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
+PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+struct vm_fault;
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, unsigned int nr);
+
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTEs:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <-------------- offset -------------> <--- zero --> E < type ->
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * The zero'ed bits include _PAGE_PRESENT.
+ */
+#define __swp_entry(type, off) ((swp_entry_t) \
+ { ((type) & 0x1f) | ((off) << 13) })
+
+/* Decode a PTE containing swap "identifier "into constituents */
+#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
+#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+PTE_BIT_FUNC(swp_mkexclusive, |= (_PAGE_SWP_EXCLUSIVE));
+PTE_BIT_FUNC(swp_clear_exclusive, &= ~(_PAGE_SWP_EXCLUSIVE));
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#include <asm/hugepage.h>
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h
new file mode 100644
index 0000000000..fc417c75c2
--- /dev/null
+++ b/arch/arc/include/asm/pgtable-levels.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Helpers for implemenintg paging levels
+ */
+
+#ifndef _ASM_ARC_PGTABLE_LEVELS_H
+#define _ASM_ARC_PGTABLE_LEVELS_H
+
+#if CONFIG_PGTABLE_LEVELS == 2
+
+/*
+ * 2 level paging setup for software walked MMUv3 (ARC700) and MMUv4 (HS)
+ *
+ * [31] 32 bit virtual address [0]
+ * -------------------------------------------------------
+ * | | <---------- PGDIR_SHIFT ----------> |
+ * | | | <-- PAGE_SHIFT --> |
+ * -------------------------------------------------------
+ * | | |
+ * | | --> off in page frame
+ * | ---> index into Page Table
+ * ----> index into Page Directory
+ *
+ * Given software walk, the vaddr split is arbitrary set to 11:8:13
+ * However enabling of super page in a 2 level regime pegs PGDIR_SHIFT to
+ * super page size.
+ */
+
+#if defined(CONFIG_ARC_HUGEPAGE_16M)
+#define PGDIR_SHIFT 24
+#elif defined(CONFIG_ARC_HUGEPAGE_2M)
+#define PGDIR_SHIFT 21
+#else
+/*
+ * No Super page case
+ * Default value provides 11:8:13 (8K), 10:10:12 (4K)
+ * Limits imposed by pgtable_t only PAGE_SIZE long
+ * (so 4K page can only have 1K entries: or 10 bits)
+ */
+#ifdef CONFIG_ARC_PAGE_SIZE_4K
+#define PGDIR_SHIFT 22
+#else
+#define PGDIR_SHIFT 21
+#endif
+
+#endif
+
+#else /* CONFIG_PGTABLE_LEVELS != 2 */
+
+/*
+ * A default 3 level paging testing setup in software walked MMU
+ * MMUv4 (8K page): <4> : <7> : <8> : <13>
+ * A default 4 level paging testing setup in software walked MMU
+ * MMUv4 (8K page): <4> : <3> : <4> : <8> : <13>
+ */
+#define PGDIR_SHIFT 28
+#if CONFIG_PGTABLE_LEVELS > 3
+#define PUD_SHIFT 25
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+#define PMD_SHIFT 21
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS */
+
+#define PGDIR_SIZE BIT(PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+#define PTRS_PER_PGD BIT(32 - PGDIR_SHIFT)
+
+#if CONFIG_PGTABLE_LEVELS > 3
+#define PUD_SIZE BIT(PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE - 1))
+#define PTRS_PER_PUD BIT(PGDIR_SHIFT - PUD_SHIFT)
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+#define PMD_SIZE BIT(PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+#define PTRS_PER_PMD BIT(PUD_SHIFT - PMD_SHIFT)
+#endif
+
+#define PTRS_PER_PTE BIT(PMD_SHIFT - PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+#if CONFIG_PGTABLE_LEVELS > 3
+#include <asm-generic/pgtable-nop4d.h>
+#elif CONFIG_PGTABLE_LEVELS > 2
+#include <asm-generic/pgtable-nopud.h>
+#else
+#include <asm-generic/pgtable-nopmd.h>
+#endif
+
+/*
+ * 1st level paging: pgd
+ */
+#define pgd_ERROR(e) \
+ pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+#if CONFIG_PGTABLE_LEVELS > 3
+
+/* In 4 level paging, p4d_* macros work on pgd */
+#define p4d_none(x) (!p4d_val(x))
+#define p4d_bad(x) ((p4d_val(x) & ~PAGE_MASK))
+#define p4d_present(x) (p4d_val(x))
+#define p4d_clear(xp) do { p4d_val(*(xp)) = 0; } while (0)
+#define p4d_pgtable(p4d) ((pud_t *)(p4d_val(p4d) & PAGE_MASK))
+#define p4d_page(p4d) virt_to_page(p4d_pgtable(p4d))
+#define set_p4d(p4dp, p4d) (*(p4dp) = p4d)
+
+/*
+ * 2nd level paging: pud
+ */
+#define pud_ERROR(e) \
+ pr_crit("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+
+/*
+ * In 3 level paging, pud_* macros work on pgd
+ * In 4 level paging, pud_* macros work on pud
+ */
+#define pud_none(x) (!pud_val(x))
+#define pud_bad(x) ((pud_val(x) & ~PAGE_MASK))
+#define pud_present(x) (pud_val(x))
+#define pud_clear(xp) do { pud_val(*(xp)) = 0; } while (0)
+#define pud_pgtable(pud) ((pmd_t *)(pud_val(pud) & PAGE_MASK))
+#define pud_page(pud) virt_to_page(pud_pgtable(pud))
+#define set_pud(pudp, pud) (*(pudp) = pud)
+
+/*
+ * 3rd level paging: pmd
+ */
+#define pmd_ERROR(e) \
+ pr_crit("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+
+#define pmd_pfn(pmd) ((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+#endif
+
+/*
+ * Due to the strange way generic pgtable level folding works, the pmd_* macros
+ * - are valid even for 2 levels (which supposedly only has pgd - pte)
+ * - behave differently for 2 vs. 3
+ * In 2 level paging (pgd -> pte), pmd_* macros work on pgd
+ * In 3+ level paging (pgd -> pmd -> pte), pmd_* macros work on pmd
+ */
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
+#define pmd_present(x) (pmd_val(x))
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
+#define pmd_pfn(pmd) ((pmd_val(pmd) & PAGE_MASK) >> PAGE_SHIFT)
+#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
+#define set_pmd(pmdp, pmd) (*(pmdp) = pmd)
+#define pmd_pgtable(pmd) ((pgtable_t) pmd_page(pmd))
+
+/*
+ * 4th level paging: pte
+ */
+#define pte_ERROR(e) \
+ pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+
+#define PFN_PTE_SHIFT PAGE_SHIFT
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm,addr,ptep) set_pte_at(mm, addr, ptep, __pte(0))
+#define pte_page(pte) pfn_to_page(pte_pfn(pte))
+#define set_pte(ptep, pte) ((*(ptep)) = (pte))
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+#ifdef CONFIG_ISA_ARCV2
+#define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ)
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
new file mode 100644
index 0000000000..4cf45a99fd
--- /dev/null
+++ b/arch/arc/include/asm/pgtable.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_PGTABLE_H
+#define _ASM_ARC_PGTABLE_H
+
+#include <linux/bits.h>
+
+#include <asm/pgtable-levels.h>
+#include <asm/pgtable-bits-arcv2.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+
+/*
+ * Number of entries a user land program use.
+ * TASK_SIZE is the maximum vaddr that can be used by a userland program.
+ */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+#ifndef __ASSEMBLY__
+
+extern char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
+
+/* to cope with aliasing VIPT cache */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
new file mode 100644
index 0000000000..d606658e2f
--- /dev/null
+++ b/arch/arc/include/asm/processor.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * vineetg: March 2009
+ * -Implemented task_pt_regs( )
+ *
+ * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_PROCESSOR_H
+#define __ASM_ARC_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+
+/* Arch specific stuff which needs to be saved per task.
+ * However these items are not so important so as to earn a place in
+ * struct thread_info
+ */
+struct thread_struct {
+ unsigned long callee_reg; /* pointer to callee regs */
+ unsigned long fault_address; /* dbls as brkpt holder as well */
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+ struct dsp_callee_regs dsp;
+#endif
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+ struct arc_fpu fpu;
+#endif
+};
+
+#define INIT_THREAD { }
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
+
+/*
+ * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
+ * get optimised away by gcc
+ */
+#define cpu_relax() barrier()
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
+
+/*
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Look in process.c for details of kernel stack layout
+ */
+#define TSK_K_ESP(tsk) (task_thread_info(tsk)->ksp)
+
+#define TSK_K_REG(tsk, off) (*((unsigned long *)(TSK_K_ESP(tsk) + \
+ sizeof(struct callee_regs) + off)))
+
+#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
+#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
+
+extern void start_thread(struct pt_regs * regs, unsigned long pc,
+ unsigned long usp);
+
+extern unsigned int __get_wchan(struct task_struct *p);
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Default System Memory Map on ARC
+ *
+ * ---------------------------- (lower 2G, Translated) -------------------------
+ * 0x0000_0000 0x5FFF_FFFF (user vaddr: TASK_SIZE)
+ * 0x6000_0000 0x6FFF_FFFF (reserved gutter between U/K)
+ * 0x7000_0000 0x7FFF_FFFF (kvaddr: vmalloc/modules/pkmap..)
+ *
+ * PAGE_OFFSET ---------------- (Upper 2G, Untranslated) -----------------------
+ * 0x8000_0000 0xBFFF_FFFF (kernel direct mapped)
+ * 0xC000_0000 0xFFFF_FFFF (peripheral uncached space)
+ * -----------------------------------------------------------------------------
+ */
+
+#define TASK_SIZE 0x60000000
+
+#define VMALLOC_START (PAGE_OFFSET - (CONFIG_ARC_KVADDR_SIZE << 20))
+
+/* 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter (see asm/highmem.h) */
+#define VMALLOC_SIZE ((CONFIG_ARC_KVADDR_SIZE << 20) - PMD_SIZE * 4)
+
+#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
+
+#define USER_KERNEL_GUTTER (VMALLOC_START - TASK_SIZE)
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
new file mode 100644
index 0000000000..4a2b30fb5a
--- /dev/null
+++ b/arch/arc/include/asm/ptrace.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+#ifndef __ASM_ARC_PTRACE_H
+#define __ASM_ARC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+typedef union {
+ struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned long state:8, vec:8, cause:8, param:8;
+#else
+ unsigned long param:8, cause:8, vec:8, state:8;
+#endif
+ };
+ unsigned long full;
+} ecr_reg;
+
+/* THE pt_regs: Defines how regs are saved during entry into kernel */
+
+#ifdef CONFIG_ISA_ARCOMPACT
+struct pt_regs {
+
+ /* Real registers */
+ unsigned long bta; /* bta_l1, bta_l2, erbta */
+
+ unsigned long lp_start, lp_end, lp_count;
+
+ unsigned long status32; /* status32_l1, status32_l2, erstatus */
+ unsigned long ret; /* ilink1, ilink2 or eret */
+ unsigned long blink;
+ unsigned long fp;
+ unsigned long r26; /* gp */
+
+ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+
+ unsigned long sp; /* User/Kernel depending on where we came from */
+ unsigned long orig_r0;
+
+ /*
+ * To distinguish bet excp, syscall, irq
+ * For traps and exceptions, Exception Cause Register.
+ * ECR: <00> <VV> <CC> <PP>
+ * Last word used by Linux for extra state mgmt (syscall-restart)
+ * For interrupts, use artificial ECR values to note current prio-level
+ */
+ ecr_reg ecr;
+};
+
+#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
+
+#else
+
+struct pt_regs {
+
+ unsigned long orig_r0;
+
+ ecr_reg ecr; /* Exception Cause Reg */
+
+ unsigned long bta; /* erbta */
+
+ unsigned long fp;
+ unsigned long r30;
+ unsigned long r12;
+ unsigned long r26; /* gp */
+
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */
+#endif
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+ unsigned long DSP_CTRL;
+#endif
+
+ unsigned long sp; /* user/kernel sp depending on entry */
+
+ /*------- Below list auto saved by h/w -----------*/
+ unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
+
+ unsigned long blink;
+ unsigned long lp_end, lp_start, lp_count;
+
+ unsigned long ei, ldi, jli;
+
+ unsigned long ret;
+ unsigned long status32;
+};
+
+#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
+
+#endif
+
+/* Callee saved registers - need to be saved only when you are scheduled out */
+
+struct callee_regs {
+ unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+};
+
+#define instruction_pointer(regs) ((regs)->ret)
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* return 1 if user mode or 0 if kernel mode */
+#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
+
+#define user_stack_pointer(regs)\
+({ unsigned int sp; \
+ if (user_mode(regs)) \
+ sp = (regs)->sp;\
+ else \
+ sp = -1; \
+ sp; \
+})
+
+/* return 1 if PC in delay slot */
+#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
+
+#define in_syscall(regs) ((regs->ecr.vec == ECR_V_TRAP) && !regs->ecr.param)
+#define in_brkpt_trap(regs) ((regs->ecr.vec == ECR_V_TRAP) && regs->ecr.param)
+
+#define STATE_SCALL_RESTARTED 0x01
+
+#define syscall_wont_restart(regs) (regs->ecr.state |= STATE_SCALL_RESTARTED)
+#define syscall_restartable(regs) !(regs->ecr.state & STATE_SCALL_RESTARTED)
+
+#define current_pt_regs() \
+({ \
+ /* open-coded current_thread_info() */ \
+ register unsigned long sp asm ("sp"); \
+ unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
+ (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \
+})
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ return (long)regs->r0;
+}
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ instruction_pointer(regs) = val;
+}
+
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+extern int syscall_trace_entry(struct pt_regs *);
+extern void syscall_trace_exit(struct pt_regs *);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
new file mode 100644
index 0000000000..860b4fd67a
--- /dev/null
+++ b/arch/arc/include/asm/sections.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_SECTIONS_H
+#define _ASM_ARC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char __arc_dccm_base[];
+
+#endif
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
new file mode 100644
index 0000000000..83062c8b97
--- /dev/null
+++ b/arch/arc/include/asm/serial.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_SERIAL_H
+#define _ASM_ARC_SERIAL_H
+
+/*
+ * early 8250 (now earlycon) requires BASE_BAUD to be defined in this header.
+ * However to still determine it dynamically (for multi-platform images)
+ * we do this in a helper by parsing the FDT early
+ */
+
+extern unsigned int __init arc_early_base_baud(void);
+
+#define BASE_BAUD arc_early_base_baud()
+
+#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
new file mode 100644
index 0000000000..1c6db599e1
--- /dev/null
+++ b/arch/arc/include/asm/setup.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+#ifndef __ASM_ARC_SETUP_H
+#define __ASM_ARC_SETUP_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define COMMAND_LINE_SIZE 256
+
+/*
+ * Data structure to map a ID to string
+ * Used a lot for bootup reporting of hardware diversity
+ */
+struct id_to_str {
+ int id;
+ const char *str;
+};
+
+extern int root_mountflags, end_mem;
+
+void setup_processor(void);
+void __init setup_arch_memory(void);
+long __init arc_get_mem_sz(void);
+
+/* Helpers used in arc_*_mumbojumbo routines */
+#define IS_AVAIL1(v, s) ((v) ? s : "")
+#define IS_DISABLED_RUN(v) ((v) ? "" : "(disabled) ")
+#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
+#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
+#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
+#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
+
+extern void arc_mmu_init(void);
+extern int arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
+
+extern void arc_cache_init(void);
+extern int arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+
+extern void __init handle_uboot_args(void);
+
+#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
new file mode 100644
index 0000000000..8b0251464f
--- /dev/null
+++ b/arch/arc/include/asm/shmparam.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ARC_ASM_SHMPARAM_H
+#define __ARC_ASM_SHMPARAM_H
+
+/* Handle upto 2 cache bins */
+#define SHMLBA (2 * PAGE_SIZE)
+
+/* Enforce SHMLBA in shmat */
+#define __ARCH_FORCE_SHMLBA
+
+#endif
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
new file mode 100644
index 0000000000..e0913f52c2
--- /dev/null
+++ b/arch/arc/include/asm/smp.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_ARC_SMP_H
+#define __ASM_ARC_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+/* including cpumask.h leads to cyclic deps hence this Forward declaration */
+struct cpumask;
+
+/*
+ * APIs provided by arch SMP code to generic code
+ */
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+/*
+ * APIs provided by arch SMP code to rest of arch code
+ */
+extern void __init smp_init_cpus(void);
+extern void first_lines_of_secondary(void);
+extern const char *arc_platform_smp_cpuinfo(void);
+extern void arc_platform_smp_wait_to_boot(int);
+extern void start_kernel_secondary(void);
+
+/*
+ * API expected BY platform smp code (FROM arch smp code)
+ *
+ * smp_ipi_irq_setup:
+ * Takes @cpu and @hwirq to which the arch-common ISR is hooked up
+ */
+extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
+
+/*
+ * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
+ *
+ * @info: SoC SMP specific info for /proc/cpuinfo etc
+ * @init_early_smp: A SMP specific h/w block can init itself
+ * Could be common across platforms so not covered by
+ * mach_desc->init_early()
+ * @init_per_cpu: Called for each core so SMP h/w block driver can do
+ * any needed setup per cpu (e.g. IPI request)
+ * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
+ * @ipi_send: To send IPI to a @cpu
+ * @ips_clear: To clear IPI received at @irq
+ */
+struct plat_smp_ops {
+ const char *info;
+ void (*init_early_smp)(void);
+ void (*init_per_cpu)(int cpu);
+ void (*cpu_kick)(int cpu, unsigned long pc);
+ void (*ipi_send)(int cpu);
+ void (*ipi_clear)(int irq);
+};
+
+/* TBD: stop exporting it for direct population by platform */
+extern struct plat_smp_ops plat_smp_ops;
+
+#else /* CONFIG_SMP */
+
+static inline void smp_init_cpus(void) {}
+static inline const char *arc_platform_smp_cpuinfo(void)
+{
+ return "";
+}
+
+#endif /* !CONFIG_SMP */
+
+/*
+ * ARC700 doesn't support atomic Read-Modify-Write ops.
+ * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
+ * based on retry-if-irq-in-atomic (with hardware assist).
+ * However despite these, we provide the IRQ disabling variant
+ *
+ * (1) These insn were introduced only in 4.10 release. So for older released
+ * support needed.
+ *
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
+ * gaurantted by the platform (not something which core handles).
+ * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
+ * disabling for atomicity.
+ *
+ * However exported spinlock API is not usable due to cyclic hdr deps
+ * (even after system.h disintegration upstream)
+ * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
+ * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
+ *
+ * So the workaround is to use the lowest level arch spinlock API.
+ * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
+ * but same is not true for ARCH backend, hence the need for 2 variants
+ */
+#ifndef CONFIG_ARC_HAS_LLSC
+
+#include <linux/irqflags.h>
+#ifdef CONFIG_SMP
+
+#include <asm/spinlock.h>
+
+extern arch_spinlock_t smp_atomic_ops_lock;
+
+#define atomic_ops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_atomic_ops_lock); \
+} while (0)
+
+#define atomic_ops_unlock(flags) do { \
+ arch_spin_unlock(&smp_atomic_ops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#else /* !CONFIG_SMP */
+
+#define atomic_ops_lock(flags) local_irq_save(flags)
+#define atomic_ops_unlock(flags) local_irq_restore(flags)
+
+#endif /* !CONFIG_SMP */
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+#endif
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
new file mode 100644
index 0000000000..1928716089
--- /dev/null
+++ b/arch/arc/include/asm/spinlock.h
@@ -0,0 +1,382 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/spinlock_types.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
+
+#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int val;
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ /*
+ * ACQUIRE barrier to ensure load/store after taking the lock
+ * don't "bleed-up" out of the critical section (leak-in is allowed)
+ * http://www.spinics.net/lists/kernel/msg2010409.html
+ *
+ * ARCv2 only has load-load, store-store and all-all barrier
+ * thus need the full all-all barrier
+ */
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int val, got_it = 0;
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bnz 1b \n"
+ " mov %[got_it], 1 \n"
+ "4: \n"
+ " \n"
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+
+ WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ *
+ * if (rw->counter > 0) {
+ * rw->counter--;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
+ " sub %[val], %[val], 1 \n" /* reader lock */
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
+ " sub %[val], %[val], 1 \n" /* counter-- */
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n" /* retry if collided with someone */
+ " mov %[got_it], 1 \n"
+ " \n"
+ "4: ; --- done --- \n"
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ *
+ * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ * rw->counter = 0;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n" /* retry if collided with someone */
+ " mov %[got_it], 1 \n"
+ " \n"
+ "4: ; --- done --- \n"
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter++;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " add %[val], %[val], 1 \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter))
+ : "memory", "cc");
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ smp_mb();
+
+ WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
+
+ /*
+ * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
+ * for ACQ and REL semantics respectively. However EX based spinlocks
+ * need the extra smp_mb to workaround a hardware quirk.
+ */
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ " breq %0, %2, 1b \n"
+ : "+&r" (val)
+ : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ : "+r" (val)
+ : "r"(&(lock->slock))
+ : "memory");
+
+ smp_mb();
+
+ return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ /*
+ * RELEASE barrier: given the instructions avail on ARCv2, full barrier
+ * is the only option
+ */
+ smp_mb();
+
+ /*
+ * EX is not really required here, a simple STore of 0 suffices.
+ * However this causes tasklist livelocks in SystemC based SMP virtual
+ * platforms where the systemc core scheduler uses EX as a cue for
+ * moving to next core. Do a git log of this file for details
+ */
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r" (val)
+ : "r"(&(lock->slock))
+ : "memory");
+
+ /*
+ * see pairing version/comment in arch_spin_lock above
+ */
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ *
+ * The spinlock itself is contained in @counter and access to it is
+ * serialized with @lock_mutex.
+ */
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ */
+ if (rw->counter > 0) {
+ rw->counter--;
+ ret = 1;
+ }
+
+ arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ */
+ if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ rw->counter = 0;
+ ret = 1;
+ }
+ arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ while (!arch_read_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ while (!arch_write_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter++;
+ arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+ arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
+}
+
+#endif
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
new file mode 100644
index 0000000000..7cd0373998
--- /dev/null
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED__ 0
+#define __ARCH_SPIN_LOCK_LOCKED__ 1
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED__ }
+#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
+
+/*
+ * Unlocked : 0x0100_0000
+ * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
+ * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
+ */
+typedef struct {
+ volatile unsigned int counter;
+#ifndef CONFIG_ARC_HAS_LLSC
+ arch_spinlock_t lock_mutex;
+#endif
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
+#define __ARCH_RW_LOCK_UNLOCKED { .counter = __ARCH_RW_LOCK_UNLOCKED__ }
+
+#endif
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h
new file mode 100644
index 0000000000..4c50fb003d
--- /dev/null
+++ b/arch/arc/include/asm/stacktrace.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+#include <linux/sched.h>
+
+/**
+ * arc_unwind_core - Unwind the kernel mode stack for an execution context
+ * @tsk: NULL for current task, specific task otherwise
+ * @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
+ * If NULL, use pt_regs of @tsk (if !NULL) otherwise
+ * use the current values of {SP, FP, BLINK, PC}
+ * @consumer_fn: Callback invoked for each frame unwound
+ * Returns 0 to continue unwinding, -1 to stop
+ * @arg: Arg to callback
+ *
+ * Returns the address of first function in stack
+ *
+ * Semantics:
+ * - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL
+ * - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL
+ * - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
+ */
+notrace noinline unsigned int arc_unwind_core(
+ struct task_struct *tsk, struct pt_regs *regs,
+ int (*consumer_fn) (unsigned int, void *),
+ void *arg);
+
+#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
new file mode 100644
index 0000000000..3182ea9dcd
--- /dev/null
+++ b/arch/arc/include/asm/string.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * vineetg: May 2011
+ * -We had half-optimised memset/memcpy, got better versions of those
+ * -Added memcmp, strchr, strcpy, strcmp, strlen
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_STRING_H
+#define _ASM_ARC_STRING_H
+
+#include <linux/types.h>
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_STRCHR
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRLEN
+
+extern void *memset(void *ptr, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void memzero(void *ptr, __kernel_size_t n);
+extern int memcmp(const void *, const void *, __kernel_size_t);
+extern char *strchr(const char *s, int c);
+extern char *strcpy(char *dest, const char *src);
+extern int strcmp(const char *cs, const char *ct);
+extern __kernel_size_t strlen(const char *);
+
+#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
new file mode 100644
index 0000000000..1f85de8288
--- /dev/null
+++ b/arch/arc/include/asm/switch_to.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_SWITCH_TO_H
+#define _ASM_ARC_SWITCH_TO_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/dsp-impl.h>
+#include <asm/fpu.h>
+
+struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
+
+#define switch_to(prev, next, last) \
+do { \
+ dsp_save_restore(prev, next); \
+ fpu_save_restore(prev, next); \
+ last = __switch_to(prev, next);\
+ mb(); \
+} while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
new file mode 100644
index 0000000000..9709256e31
--- /dev/null
+++ b/arch/arc/include/asm/syscall.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_SYSCALL_H
+#define _ASM_ARC_SYSCALL_H 1
+
+#include <uapi/linux/audit.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h> /* in_syscall() */
+
+extern void *sys_call_table[];
+
+static inline long
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ if (user_mode(regs) && in_syscall(regs))
+ return regs->r8;
+ else
+ return -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ regs->r0 = regs->orig_r0;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ /* 0 if syscall succeeded, otherwise -Errorcode */
+ return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ regs->r0 = (long) error ?: val;
+}
+
+/*
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ */
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *args)
+{
+ unsigned long *inside_ptregs = &(regs->r0);
+ unsigned int n = 6;
+ unsigned int i = 0;
+
+ while (n--) {
+ args[i++] = (*inside_ptregs);
+ inside_ptregs--;
+ }
+}
+
+static inline int
+syscall_get_arch(struct task_struct *task)
+{
+ return IS_ENABLED(CONFIG_ISA_ARCOMPACT)
+ ? (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+ ? AUDIT_ARCH_ARCOMPACTBE : AUDIT_ARCH_ARCOMPACT)
+ : (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+ ? AUDIT_ARCH_ARCV2BE : AUDIT_ARCH_ARCV2);
+}
+
+#endif
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
new file mode 100644
index 0000000000..c3f4714a4f
--- /dev/null
+++ b/arch/arc/include/asm/syscalls.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_SYSCALLS_H
+#define _ASM_ARC_SYSCALLS_H 1
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+int sys_clone_wrapper(int, int, int, int, int);
+int sys_clone3_wrapper(void *, size_t);
+int sys_cacheflush(uint32_t, uint32_t uint32_t);
+int sys_arc_settls(void *);
+int sys_arc_gettls(void);
+int sys_arc_usr_cmpxchg(int *, int, int);
+
+#include <asm-generic/syscalls.h>
+
+#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
new file mode 100644
index 0000000000..4c530cf131
--- /dev/null
+++ b/arch/arc/include/asm/thread_info.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Vineetg: Oct 2009
+ * No need for ARC specific thread_info allocator (kmalloc/free). This is
+ * anyways one page allocation, thus slab alloc can be short-circuited and
+ * the generic version (get_free_page) would be loads better.
+ *
+ * Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#include <asm/page.h>
+
+#ifdef CONFIG_16KSTACKS
+#define THREAD_SIZE_ORDER 1
+#else
+#define THREAD_SIZE_ORDER 0
+#endif
+
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_SHIFT (PAGE_SHIFT << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ unsigned long ksp; /* kernel mode stack top in __switch_to */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ int cpu; /* current CPU */
+ unsigned long thr_ptr; /* TLS ptr */
+ struct task_struct *task; /* main task structure */
+};
+
+/*
+ * initilaize thread_info for any @tsk
+ * - this is not related to init_task per se
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+}
+
+static inline __attribute_const__ struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm("sp");
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_RESTORE_SIGMASK 0 /* restore sig mask in do_signal() */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
+#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
+#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
+/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE 16
+#define TIF_SYSCALL_TRACEPOINT 17 /* syscall tracepoint instrumentation */
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
+#define _TIF_MEMDIE (1<<TIF_MEMDIE)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL)
+
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
+
+/*
+ * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
+ * SYSCALL_TRACE is anyway separately/unconditionally tested right after a
+ * syscall, so all that remains to be tested is _TIF_WORK_MASK
+ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/timex.h b/arch/arc/include/asm/timex.h
new file mode 100644
index 0000000000..48b3482bc9
--- /dev/null
+++ b/arch/arc/include/asm/timex.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_TIMEX_H
+#define _ASM_ARC_TIMEX_H
+
+#define CLOCK_TICK_RATE 80000000 /* slated to be removed */
+
+#include <asm-generic/timex.h>
+
+/* XXX: get_cycles() to be implemented with RTSC insn */
+
+#endif /* _ASM_ARC_TIMEX_H */
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
new file mode 100644
index 0000000000..975b35d373
--- /dev/null
+++ b/arch/arc/include/asm/tlb.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_TLB_H
+#define _ASM_ARC_TLB_H
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
new file mode 100644
index 0000000000..992a2837a5
--- /dev/null
+++ b/arch/arc/include/asm/tlbflush.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_ARC_TLBFLUSH__
+#define __ASM_ARC_TLBFLUSH__
+
+#include <linux/mm.h>
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#endif
+
+#ifndef CONFIG_SMP
+#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e)
+#endif
+#else
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+#endif
+#endif /* CONFIG_SMP */
+#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
new file mode 100644
index 0000000000..1e8809ea00
--- /dev/null
+++ b/arch/arc/include/asm/uaccess.h
@@ -0,0 +1,638 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * vineetg: June 2010
+ * -__clear_user( ) called multiple times during elf load was byte loop
+ * converted to do as much word clear as possible.
+ *
+ * vineetg: Dec 2009
+ * -Hand crafted constant propagation for "constant" copy sizes
+ * -stock kernel shrunk by 33K at -O3
+ *
+ * vineetg: Sept 2009
+ * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
+ * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
+ * -Enabled when doing -Os
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_UACCESS_H
+#define _ASM_ARC_UACCESS_H
+
+#include <linux/string.h> /* for generic string functions */
+
+/*********** Single byte/hword/word copies ******************/
+
+#define __get_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
+ case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
+ case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
+ case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+/*
+ * Returns 0 on success, -EFAULT if not.
+ * @ret already contains 0 - given that errors will be less likely
+ * (hence +r asm constraint below).
+ * In case of error, fixup code will make it -EFAULT
+ */
+#define __arc_get_user_one(dst, src, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: # return -EFAULT\n" \
+ " mov %0, %3\n" \
+ " # zero out dst ptr\n" \
+ " mov %1, 0\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __arc_get_user_one_64(dst, src, ret) \
+ __asm__ __volatile__( \
+ "1: ld %1,[%2]\n" \
+ "4: ld %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: # return -EFAULT\n" \
+ " mov %0, %3\n" \
+ " # zero out dst ptr\n" \
+ " mov %1, 0\n" \
+ " mov %R1, 0\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __put_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
+ case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
+ case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
+ case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+#define __arc_put_user_one(src, dst, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+#define __arc_put_user_one_64(src, dst, ret) \
+ __asm__ __volatile__( \
+ "1: st %1,[%2]\n" \
+ "4: st %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* fallback for unaligned access when hardware doesn't support */
+ if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
+ (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__ (
+ " mov.f lp_count, %0 \n"
+ " lpnz 2f \n"
+ "1: ldb.ab %1, [%3, 1] \n"
+ " stb.ab %1, [%2, 1] \n"
+ " sub %0,%0,1 \n"
+ "2: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 2b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 3b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /*
+ * Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "memory");
+
+ return n;
+ }
+
+ /*
+ * Hand-crafted constant propagation to reduce code sz of the
+ * laddered copy 16x,8,4,2,1
+ */
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ "1: ld.ab %3, [%2, 4] \n"
+ "11: ld.ab %4, [%2, 4] \n"
+ "12: ld.ab %5, [%2, 4] \n"
+ "13: ld.ab %6, [%2, 4] \n"
+ " st.ab %3, [%1, 4] \n"
+ " st.ab %4, [%1, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ "14: ld.ab %3, [%2,4] \n"
+ "15: ld.ab %4, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " st.ab %4, [%1,4] \n"
+ " sub %0,%0,8 \n"
+ "31: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ "16: ld.ab %3, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " sub %0,%0,4 \n"
+ "32: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ "17: ldw.ab %3, [%2,2] \n"
+ " stw.ab %3, [%1,2] \n"
+ " sub %0,%0,2 \n"
+ "33: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ "18: ldb.ab %3, [%2,2] \n"
+ " stb.ab %3, [%1,2] \n"
+ " sub %0,%0,1 \n"
+ "34: ; nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ "1: ld.ab %5, [%2, 4] \n"
+ "11: ld.ab %6, [%2, 4] \n"
+ "12: ld.ab %7, [%2, 4] \n"
+ "13: ld.ab %8, [%2, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " st.ab %7, [%1, 4] \n"
+ " st.ab %8, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ "14: ld.ab %5, [%2,4] \n"
+ "15: ld.ab %6, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " st.ab %6, [%1,4] \n"
+ " sub.f %0,%0,8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ "16: ld.ab %5, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " sub.f %0,%0,4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ "17: ldw.ab %5, [%2,2] \n"
+ " stw.ab %5, [%1,2] \n"
+ " sub.f %0,%0,2 \n"
+ "33: bbit0 %3,0,34f \n"
+ "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ " stb.ab %5, [%1,1] \n"
+ " sub.f %0,%0,1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* fallback for unaligned access when hardware doesn't support */
+ if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
+ (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__(
+ " mov.f lp_count, %0 \n"
+ " lpnz 3f \n"
+ " ldb.ab %1, [%3, 1] \n"
+ "1: stb.ab %1, [%2, 1] \n"
+ " sub %0, %0, 1 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /* Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "memory");
+
+ return n;
+ }
+
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ " ld.ab %3, [%2, 4] \n"
+ " ld.ab %4, [%2, 4] \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ "1: st.ab %3, [%1, 4] \n"
+ "11: st.ab %4, [%1, 4] \n"
+ "12: st.ab %5, [%1, 4] \n"
+ "13: st.ab %6, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ " ld.ab %4, [%2,4] \n"
+ "14: st.ab %3, [%1,4] \n"
+ "15: st.ab %4, [%1,4] \n"
+ " sub %0, %0, 8 \n"
+ "31:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ "16: st.ab %3, [%1,4] \n"
+ " sub %0, %0, 4 \n"
+ "32:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ " ldw.ab %3, [%2,2] \n"
+ "17: stw.ab %3, [%1,2] \n"
+ " sub %0, %0, 2 \n"
+ "33:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ " ldb.ab %3, [%2,1] \n"
+ "18: stb.ab %3, [%1,1] \n"
+ " sub %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ " ld.ab %7, [%2, 4] \n"
+ " ld.ab %8, [%2, 4] \n"
+ "1: st.ab %5, [%1, 4] \n"
+ "11: st.ab %6, [%1, 4] \n"
+ "12: st.ab %7, [%1, 4] \n"
+ "13: st.ab %8, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ " ld.ab %6, [%2,4] \n"
+ "14: st.ab %5, [%1,4] \n"
+ "15: st.ab %6, [%1,4] \n"
+ " sub.f %0, %0, 8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ "16: st.ab %5, [%1,4] \n"
+ " sub.f %0, %0, 4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ " ldw.ab %5, [%2,2] \n"
+ "17: stw.ab %5, [%1,2] \n"
+ " sub.f %0, %0, 2 \n"
+ "33: bbit0 %3,0,34f \n"
+ " ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ "18: stb.ab %5, [%1,1] \n"
+ " sub.f %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+static inline unsigned long __clear_user(void __user *to, unsigned long n)
+{
+ long res = n;
+ unsigned char *d_char = to;
+
+ __asm__ __volatile__(
+ " bbit0 %0, 0, 1f \n"
+ "75: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "1: bbit0 %0, 1, 2f \n"
+ "76: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "2: asr.f lp_count, %1, 2 \n"
+ " lpnz 3f \n"
+ "77: st.ab %2, [%0,4] \n"
+ " sub %1, %1, 4 \n"
+ "3: bbit0 %1, 1, 4f \n"
+ "78: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "4: bbit0 %1, 0, 5f \n"
+ "79: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "5: \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 5b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 75b, 3b \n"
+ " .word 76b, 3b \n"
+ " .word 77b, 3b \n"
+ " .word 78b, 3b \n"
+ " .word 79b, 3b \n"
+ " .previous \n"
+ : "+r"(d_char), "+r"(res)
+ : "i"(0)
+ : "lp_count", "memory");
+
+ return res;
+}
+
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
+
+#define __clear_user __clear_user
+
+#include <asm-generic/uaccess.h>
+
+#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
new file mode 100644
index 0000000000..cf5a02382e
--- /dev/null
+++ b/arch/arc/include/asm/unaligned.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_UNALIGNED_H
+#define _ASM_ARC_UNALIGNED_H
+
+/* ARC700 can't handle unaligned Data accesses. */
+
+#include <asm-generic/unaligned.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ARC_EMUL_UNALIGNED
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ struct callee_regs *cregs);
+#else
+static inline int
+misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ /* Not fixed */
+ return 1;
+}
+#endif
+
+#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
new file mode 100644
index 0000000000..e95a20453a
--- /dev/null
+++ b/arch/arc/include/asm/unwind.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_UNWIND_H
+#define _ASM_ARC_UNWIND_H
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+#include <linux/sched.h>
+
+struct arc700_regs {
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r16;
+ unsigned long r17;
+ unsigned long r18;
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+ unsigned long r23;
+ unsigned long r24;
+ unsigned long r25;
+ unsigned long r26;
+ unsigned long r27; /* fp */
+ unsigned long r28; /* sp */
+ unsigned long r29;
+ unsigned long r30;
+ unsigned long r31; /* blink */
+ unsigned long r63; /* pc */
+};
+
+struct unwind_frame_info {
+ struct arc700_regs regs;
+ struct task_struct *task;
+ unsigned call_frame:1;
+};
+
+#define UNW_PC(frame) ((frame)->regs.r63)
+#define UNW_SP(frame) ((frame)->regs.r28)
+#define UNW_BLINK(frame) ((frame)->regs.r31)
+
+/* Rajesh FIXME */
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame) ((frame)->regs.r27)
+#define FRAME_RETADDR_OFFSET 4
+#define FRAME_LINK_OFFSET 0
+#define STACK_BOTTOM_UNW(tsk) STACK_LIMIT((tsk)->thread.ksp)
+#define STACK_TOP_UNW(tsk) ((tsk)->thread.ksp)
+#else
+#define UNW_FP(frame) ((void)(frame), 0)
+#endif
+
+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#define UNW_REGISTER_INFO \
+ PTREGS_INFO(r0), \
+ PTREGS_INFO(r1), \
+ PTREGS_INFO(r2), \
+ PTREGS_INFO(r3), \
+ PTREGS_INFO(r4), \
+ PTREGS_INFO(r5), \
+ PTREGS_INFO(r6), \
+ PTREGS_INFO(r7), \
+ PTREGS_INFO(r8), \
+ PTREGS_INFO(r9), \
+ PTREGS_INFO(r10), \
+ PTREGS_INFO(r11), \
+ PTREGS_INFO(r12), \
+ PTREGS_INFO(r13), \
+ PTREGS_INFO(r14), \
+ PTREGS_INFO(r15), \
+ PTREGS_INFO(r16), \
+ PTREGS_INFO(r17), \
+ PTREGS_INFO(r18), \
+ PTREGS_INFO(r19), \
+ PTREGS_INFO(r20), \
+ PTREGS_INFO(r21), \
+ PTREGS_INFO(r22), \
+ PTREGS_INFO(r23), \
+ PTREGS_INFO(r24), \
+ PTREGS_INFO(r25), \
+ PTREGS_INFO(r26), \
+ PTREGS_INFO(r27), \
+ PTREGS_INFO(r28), \
+ PTREGS_INFO(r29), \
+ PTREGS_INFO(r30), \
+ PTREGS_INFO(r31), \
+ PTREGS_INFO(r63)
+
+#define UNW_DEFAULT_RA(raItem, dataAlign) \
+ ((raItem).where == Memory && !((raItem).value * (dataAlign) + 4))
+
+extern int arc_unwind(struct unwind_frame_info *frame);
+extern void arc_unwind_init(void);
+extern void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size);
+extern void unwind_remove_table(void *handle, int init_only);
+
+static inline int
+arch_unwind_init_running(struct unwind_frame_info *info,
+ int (*callback) (struct unwind_frame_info *info,
+ void *arg),
+ void *arg)
+{
+ return 0;
+}
+
+static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+{
+ return 0;
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+ return;
+}
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+ struct pt_regs *regs)
+{
+ return;
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0)
+#define UNW_SP(frame) ((void)(frame), 0)
+#define UNW_FP(frame) ((void)(frame), 0)
+
+static inline void arc_unwind_init(void)
+{
+}
+
+#define unwind_add_table(a, b, c)
+#define unwind_remove_table(a, b)
+
+#endif /* CONFIG_ARC_DW2_UNWIND */
+
+#endif /* _ASM_ARC_UNWIND_H */
diff --git a/arch/arc/include/asm/vermagic.h b/arch/arc/include/asm/vermagic.h
new file mode 100644
index 0000000000..a10257d2c6
--- /dev/null
+++ b/arch/arc/include/asm/vermagic.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#define MODULE_ARCH_VERMAGIC "ARC700"
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/arc/include/asm/vmalloc.h b/arch/arc/include/asm/vmalloc.h
new file mode 100644
index 0000000000..973095aad6
--- /dev/null
+++ b/arch/arc/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_ARC_VMALLOC_H
+#define _ASM_ARC_VMALLOC_H
+
+#endif /* _ASM_ARC_VMALLOC_H */
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
new file mode 100644
index 0000000000..e784701419
--- /dev/null
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += ucontext.h
diff --git a/arch/arc/include/uapi/asm/bpf_perf_event.h b/arch/arc/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 0000000000..6cb1c28232
--- /dev/null
+++ b/arch/arc/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_regs_struct bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/arch/arc/include/uapi/asm/byteorder.h b/arch/arc/include/uapi/asm/byteorder.h
new file mode 100644
index 0000000000..5540111531
--- /dev/null
+++ b/arch/arc/include/uapi/asm/byteorder.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_BYTEORDER_H
+#define __ASM_ARC_BYTEORDER_H
+
+#ifdef __BIG_ENDIAN__
+#include <linux/byteorder/big_endian.h>
+#else
+#include <linux/byteorder/little_endian.h>
+#endif
+
+#endif /* ASM_ARC_BYTEORDER_H */
diff --git a/arch/arc/include/uapi/asm/cachectl.h b/arch/arc/include/uapi/asm/cachectl.h
new file mode 100644
index 0000000000..0e4f2affc7
--- /dev/null
+++ b/arch/arc/include/uapi/asm/cachectl.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHECTL_H
+#define __ARC_ASM_CACHECTL_H
+
+/*
+ * ARC ABI flags defined for Android's finegrained cacheflush requirements
+ */
+#define CF_I_INV 0x0002
+#define CF_D_FLUSH 0x0010
+#define CF_D_FLUSH_INV 0x0020
+
+#define CF_DEFAULT (CF_I_INV | CF_D_FLUSH)
+
+/*
+ * Standard flags expected by cacheflush system call users
+ */
+#define ICACHE CF_I_INV
+#define DCACHE CF_D_FLUSH
+#define BCACHE (CF_I_INV | CF_D_FLUSH)
+
+#endif
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
new file mode 100644
index 0000000000..3c1dae4e5a
--- /dev/null
+++ b/arch/arc/include/uapi/asm/elf.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_ELF_H
+#define _UAPI__ASM_ARC_ELF_H
+
+#include <asm/ptrace.h> /* for user_regs_struct */
+
+/* Machine specific ELF Hdr flags */
+#define EF_ARC_OSABI_MSK 0x00000f00
+
+#define EF_ARC_OSABI_V3 0x00000300 /* v3 (no legacy syscalls) */
+#define EF_ARC_OSABI_V4 0x00000400 /* v4 (64bit data any reg align) */
+
+#if __GNUC__ < 6
+#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V3
+#else
+#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V4
+#endif
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_fpregset_t;
+
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+#define ELF_ARCV2REG (sizeof(struct user_regs_arcv2) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#endif
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
new file mode 100644
index 0000000000..2a4ad619ab
--- /dev/null
+++ b/arch/arc/include/uapi/asm/page.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_PAGE_H
+#define _UAPI__ASM_ARC_PAGE_H
+
+#include <linux/const.h>
+
+/* PAGE_SHIFT determines the page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define PAGE_SHIFT 14
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define PAGE_SHIFT 12
+#else
+/*
+ * Default 8k
+ * done this way (instead of under CONFIG_ARC_PAGE_SIZE_8K) because adhoc
+ * user code (busybox appletlib.h) expects PAGE_SHIFT to be defined w/o
+ * using the correct uClibc header and in their build our autoconf.h is
+ * not available
+ */
+#define PAGE_SHIFT 13
+#endif
+
+#define PAGE_SIZE _BITUL(PAGE_SHIFT) /* Default 8K */
+#define PAGE_OFFSET _AC(0x80000000, UL) /* Kernel starts at 2G onwrds */
+
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
new file mode 100644
index 0000000000..2a6eff57f6
--- /dev/null
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _UAPI__ASM_ARC_PTRACE_H
+#define _UAPI__ASM_ARC_PTRACE_H
+
+#define PTRACE_GET_THREAD_AREA 25
+
+#ifndef __ASSEMBLY__
+/*
+ * Userspace ABI: Register state needed by
+ * -ptrace (gdbserver)
+ * -sigcontext (SA_SIGNINFO signal frame)
+ *
+ * This is to decouple pt_regs from user-space ABI, to be able to change it
+ * w/o affecting the ABI.
+ *
+ * The intermediate pad,pad2 are relics of initial layout based on pt_regs
+ * for optimizations when copying pt_regs to/from user_regs_struct.
+ * We no longer need them, but can't be changed as they are part of ABI now.
+ *
+ * Also, sigcontext only care about the scratch regs as that is what we really
+ * save/restore for signal handling. However gdb also uses the same struct
+ * hence callee regs need to be in there too.
+*/
+struct user_regs_struct {
+
+ unsigned long pad;
+ struct {
+ unsigned long bta, lp_start, lp_end, lp_count;
+ unsigned long status32, ret, blink, fp, gp;
+ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ unsigned long sp;
+ } scratch;
+ unsigned long pad2;
+ struct {
+ unsigned long r25, r24, r23, r22, r21, r20;
+ unsigned long r19, r18, r17, r16, r15, r14, r13;
+ } callee;
+ unsigned long efa; /* break pt addr, for break points in delay slots */
+ unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
+};
+
+struct user_regs_arcv2 {
+ unsigned long r30, r58, r59;
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/include/uapi/asm/setup.h b/arch/arc/include/uapi/asm/setup.h
new file mode 100644
index 0000000000..a6d4e44938
--- /dev/null
+++ b/arch/arc/include/uapi/asm/setup.h
@@ -0,0 +1,6 @@
+/*
+ * setup.h is part of userspace header ABI so UAPI scripts have to generate it
+ * even if there's nothing to export - causing empty <uapi/asm/setup.h>
+ * However to prevent "patch" from discarding it we add this placeholder
+ * comment
+ */
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
new file mode 100644
index 0000000000..7a5449dfcb
--- /dev/null
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SIGCONTEXT_H
+#define _ASM_ARC_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked.
+ */
+struct sigcontext {
+ struct user_regs_struct regs;
+ struct user_regs_arcv2 v2abi;
+};
+
+#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/include/uapi/asm/signal.h b/arch/arc/include/uapi/asm/signal.h
new file mode 100644
index 0000000000..ba3143a1b3
--- /dev/null
+++ b/arch/arc/include/uapi/asm/signal.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_SIGNAL_H
+#define _ASM_ARC_SIGNAL_H
+
+/*
+ * This is much needed for ARC sigreturn optimization.
+ * This allows uClibc to piggback the addr of a sigreturn stub in sigaction,
+ * which allows sigreturn based re-entry into kernel after handling signal.
+ * W/o this kernel needs to "synthesize" the sigreturn trampoline on user
+ * mode stack which in turn forces the following:
+ * -TLB Flush (after making the stack page executable)
+ * -Cache line Flush (to make I/D Cache lines coherent)
+ */
+#define SA_RESTORER 0x04000000
+
+#include <asm-generic/signal.h>
+
+#endif /* _ASM_ARC_SIGNAL_H */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
new file mode 100644
index 0000000000..02109cd48e
--- /dev/null
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Support single cycle endian-swap insn in ARC700 4.10
+ *
+ * vineetg: June 2009
+ * -Better htonl implementation (5 instead of 9 ALU instructions)
+ * -Hardware assisted single cycle bswap (Use Case of ARC custom instrn)
+ */
+
+#ifndef __ASM_ARC_SWAB_H
+#define __ASM_ARC_SWAB_H
+
+#include <linux/types.h>
+
+/* Native single cycle endian swap insn */
+#ifdef CONFIG_ARC_HAS_SWAPE
+
+#define __arch_swab32(x) \
+({ \
+ unsigned int tmp = x; \
+ __asm__( \
+ " swape %0, %1 \n" \
+ : "=r" (tmp) \
+ : "r" (tmp)); \
+ tmp; \
+})
+
+#else
+
+/* Several ways of Endian-Swap Emulation for ARC
+ * 0: kernel generic
+ * 1: ARC optimised "C"
+ * 2: ARC Custom instruction
+ */
+#define ARC_BSWAP_TYPE 1
+
+#if (ARC_BSWAP_TYPE == 1) /******* Software only ********/
+
+/* The kernel default implementation of htonl is
+ * return x<<24 | x>>24 |
+ * (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8;
+ *
+ * This generates 9 instructions on ARC (excluding the ld/st)
+ *
+ * 8051fd8c: ld r3,[r7,20] ; Mem op : Get the value to be swapped
+ * 8051fd98: asl r5,r3,24 ; get 3rd Byte
+ * 8051fd9c: lsr r2,r3,24 ; get 0th Byte
+ * 8051fda0: and r4,r3,0xff00
+ * 8051fda8: asl r4,r4,8 ; get 1st Byte
+ * 8051fdac: and r3,r3,0x00ff0000
+ * 8051fdb4: or r2,r2,r5 ; combine 0th and 3rd Bytes
+ * 8051fdb8: lsr r3,r3,8 ; 2nd Byte at correct place in Dst Reg
+ * 8051fdbc: or r2,r2,r4 ; combine 0,3 Bytes with 1st Byte
+ * 8051fdc0: or r2,r2,r3 ; combine 0,3,1 Bytes with 2nd Byte
+ * 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
+ *
+ * Joern suggested a better "C" algorithm which is great since
+ * (1) It is portable to any architecure
+ * (2) At the same time it takes advantage of ARC ISA (rotate intrns)
+ */
+
+#define __arch_swab32(x) \
+({ unsigned long __in = (x), __tmp; \
+ __tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */ \
+ __in = __in << 24 | __in >> 8; /* ror in,in,8 */ \
+ __tmp ^= __in; \
+ __tmp &= 0xff00ff; \
+ __tmp ^ __in; \
+})
+
+#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
+
+#define __arch_swab32(x) \
+({ \
+ unsigned int tmp = x; \
+ __asm__( \
+ " .extInstruction bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP \n"\
+ " bswap %0, %1 \n"\
+ : "=r" (tmp) \
+ : "r" (tmp)); \
+ tmp; \
+})
+
+#endif /* ARC_BSWAP_TYPE=zzz */
+
+#endif /* CONFIG_ARC_HAS_SWAPE */
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#define __SWAB_64_THRU_32__
+#endif
+
+#endif
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
new file mode 100644
index 0000000000..fa2713ae6b
--- /dev/null
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/******** no-legacy-syscalls-ABI *******/
+
+/*
+ * Non-typical guard macro to enable inclusion twice in ARCH sys.c
+ * That is how the Generic syscall wrapper generator works
+ */
+#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
+#define _UAPI_ASM_ARC_UNISTD_H
+
+#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_TIME32_SYSCALLS
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define NR_syscalls __NR_syscalls
+
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+
+/* ARC specific syscall */
+#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
+#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
+#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
+
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
+__SYSCALL(__NR_arc_settls, sys_arc_settls)
+__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
+__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
+__SYSCALL(__NR_sysfs, sys_sysfs)
+
+#undef __SYSCALL
+
+#endif
diff --git a/arch/arc/kernel/.gitignore b/arch/arc/kernel/.gitignore
new file mode 100644
index 0000000000..bbb90f92d0
--- /dev/null
+++ b/arch/arc/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
new file mode 100644
index 0000000000..95fbf9364c
--- /dev/null
+++ b/arch/arc/kernel/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+
+obj-y := head.o arcksyms.o setup.o irq.o reset.o ptrace.o process.o devtree.o
+obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o
+obj-y += ctx_sw_asm.o
+
+obj-$(CONFIG_ISA_ARCOMPACT) += entry-compact.o intc-compact.o
+obj-$(CONFIG_ISA_ARCV2) += entry-arcv2.o intc-arcv2.o
+
+obj-$(CONFIG_MODULES) += arcksyms.o module.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_ARC_MCIP) += mcip.o
+obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_ARC_EMUL_UNALIGNED) += unaligned.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+
+obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
+ifdef CONFIG_ISA_ARCOMPACT
+CFLAGS_fpu.o += -mdpfp
+endif
+
+extra-y := vmlinux.lds
diff --git a/arch/arc/kernel/arc_hostlink.c b/arch/arc/kernel/arc_hostlink.c
new file mode 100644
index 0000000000..08c5196efe
--- /dev/null
+++ b/arch/arc/kernel/arc_hostlink.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arc_hostlink.c: Pseudo-driver for Metaware provided "hostlink" facility
+ *
+ * Allows Linux userland access to host in absence of any peripherals.
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/fs.h> /* file_operations */
+#include <linux/miscdevice.h>
+#include <linux/mm.h> /* VM_IO */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
+
+static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ pr_warn("Hostlink buffer mmap ERROR\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static long arc_hl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ /* we only support, returning the physical addr to mmap in user space */
+ put_user((unsigned int)__HOSTLINK__, (int __user *)arg);
+ return 0;
+}
+
+static const struct file_operations arc_hl_fops = {
+ .unlocked_ioctl = arc_hl_ioctl,
+ .mmap = arc_hl_mmap,
+};
+
+static struct miscdevice arc_hl_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hostlink",
+ .fops = &arc_hl_fops
+};
+
+static int __init arc_hl_init(void)
+{
+ pr_info("ARC Hostlink driver mmap at 0x%p\n", __HOSTLINK__);
+ return misc_register(&arc_hl_dev);
+}
+module_init(arc_hl_init);
diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c
new file mode 100644
index 0000000000..8851c0a19e
--- /dev/null
+++ b/arch/arc/kernel/arcksyms.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arcksyms.c - Exporting symbols not exportable from their own sources
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/module.h>
+
+/* libgcc functions, not part of kernel sources */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __divsf3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __cmpdi2(void);
+extern void __fixunsdfsi(void);
+extern void __muldf3(void);
+extern void __divdf3(void);
+extern void __floatunsidf(void);
+extern void __floatunsisf(void);
+extern void __udivdi3(void);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__divsf3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__cmpdi2);
+EXPORT_SYMBOL(__fixunsdfsi);
+EXPORT_SYMBOL(__muldf3);
+EXPORT_SYMBOL(__divdf3);
+EXPORT_SYMBOL(__floatunsidf);
+EXPORT_SYMBOL(__floatunsisf);
+EXPORT_SYMBOL(__udivdi3);
+
+/* ARC optimised assembler routines */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strlen);
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
new file mode 100644
index 0000000000..f77deb7991
--- /dev/null
+++ b/arch/arc/kernel/asm-offsets.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+#include <linux/ptrace.h>
+#include <asm/hardirq.h>
+#include <asm/page.h>
+
+
+int main(void)
+{
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+
+ BLANK();
+
+ DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
+ DEFINE(THREAD_FAULT_ADDR,
+ offsetof(struct thread_struct, fault_address));
+
+ BLANK();
+
+ DEFINE(THREAD_INFO_KSP, offsetof(struct thread_info, ksp));
+ DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(THREAD_INFO_PREEMPT_COUNT,
+ offsetof(struct thread_info, preempt_count));
+
+ BLANK();
+
+ DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
+ DEFINE(TASK_PID, offsetof(struct task_struct, pid));
+ DEFINE(TASK_COMM, offsetof(struct task_struct, comm));
+
+ DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
+ DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
+
+ DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
+
+ BLANK();
+
+ DEFINE(PT_status32, offsetof(struct pt_regs, status32));
+ DEFINE(PT_event, offsetof(struct pt_regs, ecr));
+ DEFINE(PT_bta, offsetof(struct pt_regs, bta));
+ DEFINE(PT_sp, offsetof(struct pt_regs, sp));
+ DEFINE(PT_r0, offsetof(struct pt_regs, r0));
+ DEFINE(PT_r1, offsetof(struct pt_regs, r1));
+ DEFINE(PT_r2, offsetof(struct pt_regs, r2));
+ DEFINE(PT_r3, offsetof(struct pt_regs, r3));
+ DEFINE(PT_r4, offsetof(struct pt_regs, r4));
+ DEFINE(PT_r5, offsetof(struct pt_regs, r5));
+ DEFINE(PT_r6, offsetof(struct pt_regs, r6));
+ DEFINE(PT_r7, offsetof(struct pt_regs, r7));
+ DEFINE(PT_r8, offsetof(struct pt_regs, r8));
+ DEFINE(PT_r10, offsetof(struct pt_regs, r10));
+ DEFINE(PT_r26, offsetof(struct pt_regs, r26));
+ DEFINE(PT_ret, offsetof(struct pt_regs, ret));
+ DEFINE(PT_blink, offsetof(struct pt_regs, blink));
+ OFFSET(PT_fp, pt_regs, fp);
+ DEFINE(PT_lpe, offsetof(struct pt_regs, lp_end));
+ DEFINE(PT_lpc, offsetof(struct pt_regs, lp_count));
+#ifdef CONFIG_ISA_ARCV2
+ OFFSET(PT_r12, pt_regs, r12);
+ OFFSET(PT_r30, pt_regs, r30);
+#endif
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ OFFSET(PT_r58, pt_regs, r58);
+ OFFSET(PT_r59, pt_regs, r59);
+#endif
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+ OFFSET(PT_DSP_CTRL, pt_regs, DSP_CTRL);
+#endif
+
+ DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
+ DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
+
+ return 0;
+}
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
new file mode 100644
index 0000000000..48e1f21976
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Vineetg: Aug 2009
+ * -Moved core context switch macro out of entry.S into this file.
+ * -This is the more "natural" hand written assembler
+ */
+
+#include <linux/linkage.h>
+#include <asm/entry.h> /* For the SAVE_* macros */
+#include <asm/asm-offsets.h>
+
+; IN
+; - r0: prev task (also current)
+; - r1: next task
+; OUT
+; - r0: prev task (so r0 not touched)
+
+ .section .sched.text,"ax",@progbits
+ENTRY_CFI(__switch_to)
+
+ /* save kernel stack frame regs of @prev task */
+ push blink
+ CFI_DEF_CFA_OFFSET 4
+ CFI_OFFSET r31, -4
+
+ push fp
+ CFI_DEF_CFA_OFFSET 8
+ CFI_OFFSET r27, -8
+
+ mov fp, sp
+ CFI_DEF_CFA_REGISTER r27
+
+ /* kernel mode callee regs of @prev */
+ SAVE_CALLEE_SAVED_KERNEL
+
+ /*
+ * save final SP to @prev->thread_info.ksp
+ * @prev is "current" so thread_info derived from SP
+ */
+ GET_CURR_THR_INFO_FROM_SP r10
+ st sp, [r10, THREAD_INFO_KSP]
+
+ /* update @next in _current_task[] and GP register caching it */
+ SET_CURR_TASK_ON_CPU r1, r10
+
+ /* load SP from @next->thread_info.ksp */
+ ld r10, [r1, TASK_THREAD_INFO]
+ ld sp, [r10, THREAD_INFO_KSP]
+
+ /* restore callee regs, stack frame regs of @next */
+ RESTORE_CALLEE_SAVED_KERNEL
+
+ pop fp
+ CFI_RESTORE r27
+ CFI_DEF_CFA r28, 4
+
+ pop blink
+ CFI_RESTORE r31
+ CFI_DEF_CFA_OFFSET 0
+
+ j [blink]
+END_CFI(__switch_to)
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
new file mode 100644
index 0000000000..4c9e61457b
--- /dev/null
+++ b/arch/arc/kernel/devtree.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Based on reduced version of METAG
+ */
+
+
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <asm/mach_desc.h>
+#include <asm/serial.h>
+
+#ifdef CONFIG_SERIAL_EARLYCON
+
+static unsigned int __initdata arc_base_baud;
+
+unsigned int __init arc_early_base_baud(void)
+{
+ return arc_base_baud/16;
+}
+
+static void __init arc_set_early_base_baud(unsigned long dt_root)
+{
+ if (of_flat_dt_is_compatible(dt_root, "abilis,arc-tb10x"))
+ arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */
+ else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp") ||
+ of_flat_dt_is_compatible(dt_root, "snps,hsdk"))
+ arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x & HSDK) */
+ else
+ arc_base_baud = 50000000; /* Fixed default 50MHz */
+}
+#else
+#define arc_set_early_base_baud(dt_root)
+#endif
+
+static const void * __init arch_get_next_mach(const char *const **match)
+{
+ static const struct machine_desc *mdesc = __arch_info_begin;
+ const struct machine_desc *m = mdesc;
+
+ if (m >= __arch_info_end)
+ return NULL;
+
+ mdesc++;
+ *match = m->dt_compat;
+ return m;
+}
+
+/**
+ * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
+ * @dt: virtual address pointer to dt blob
+ *
+ * If a dtb was passed to the kernel, then use it to choose the correct
+ * machine_desc and to setup the system.
+ */
+const struct machine_desc * __init setup_machine_fdt(void *dt)
+{
+ const struct machine_desc *mdesc;
+ unsigned long dt_root;
+
+ if (!early_init_dt_scan(dt))
+ return NULL;
+
+ mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach);
+ if (!mdesc)
+ machine_halt();
+
+ dt_root = of_get_flat_dt_root();
+ arc_set_early_base_baud(dt_root);
+
+ return mdesc;
+}
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
new file mode 100644
index 0000000000..ccc7e8c39e
--- /dev/null
+++ b/arch/arc/kernel/disasm.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/disasm.h>
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_EMUL_UNALIGNED) || \
+ defined(CONFIG_KPROBES)
+
+/* disasm_instr: Analyses instruction at addr, stores
+ * findings in *state
+ */
+void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs)
+{
+ int fieldA = 0;
+ int fieldC = 0, fieldCisReg = 0;
+ uint16_t word1 = 0, word0 = 0;
+ int subopcode, is_linked, op_format;
+ uint16_t *ins_ptr;
+ uint16_t ins_buf[4];
+ int bytes_not_copied = 0;
+
+ memset(state, 0, sizeof(struct disasm_state));
+
+ /* This fetches the upper part of the 32 bit instruction
+ * in both the cases of Little Endian or Big Endian configurations. */
+ if (userspace) {
+ bytes_not_copied = copy_from_user(ins_buf,
+ (const void __user *) addr, 8);
+ if (bytes_not_copied > 6)
+ goto fault;
+ ins_ptr = ins_buf;
+ } else {
+ ins_ptr = (uint16_t *) addr;
+ }
+
+ word1 = *((uint16_t *)addr);
+
+ state->major_opcode = (word1 >> 11) & 0x1F;
+
+ /* Check if the instruction is 32 bit or 16 bit instruction */
+ if (state->major_opcode < 0x0B) {
+ if (bytes_not_copied > 4)
+ goto fault;
+ state->instr_len = 4;
+ word0 = *((uint16_t *)(addr+2));
+ state->words[0] = (word1 << 16) | word0;
+ } else {
+ state->instr_len = 2;
+ state->words[0] = word1;
+ }
+
+ /* Read the second word in case of limm */
+ word1 = *((uint16_t *)(addr + state->instr_len));
+ word0 = *((uint16_t *)(addr + state->instr_len + 2));
+ state->words[1] = (word1 << 16) | word0;
+
+ switch (state->major_opcode) {
+ case op_Bcc:
+ state->is_branch = 1;
+
+ /* unconditional branch s25, conditional branch s21 */
+ fieldA = (IS_BIT(state->words[0], 16)) ?
+ FIELD_s25(state->words[0]) :
+ FIELD_s21(state->words[0]);
+
+ state->delay_slot = IS_BIT(state->words[0], 5);
+ state->target = fieldA + (addr & ~0x3);
+ state->flow = direct_jump;
+ break;
+
+ case op_BLcc:
+ if (IS_BIT(state->words[0], 16)) {
+ /* Branch and Link*/
+ /* unconditional branch s25, conditional branch s21 */
+ fieldA = (IS_BIT(state->words[0], 17)) ?
+ (FIELD_s25(state->words[0]) & ~0x3) :
+ FIELD_s21(state->words[0]);
+
+ state->flow = direct_call;
+ } else {
+ /*Branch On Compare */
+ fieldA = FIELD_s9(state->words[0]) & ~0x3;
+ state->flow = direct_jump;
+ }
+
+ state->delay_slot = IS_BIT(state->words[0], 5);
+ state->target = fieldA + (addr & ~0x3);
+ state->is_branch = 1;
+ break;
+
+ case op_LD: /* LD<zz> a,[b,s9] */
+ state->write = 0;
+ state->di = BITS(state->words[0], 11, 11);
+ if (state->di)
+ break;
+ state->x = BITS(state->words[0], 6, 6);
+ state->zz = BITS(state->words[0], 7, 8);
+ state->aa = BITS(state->words[0], 9, 10);
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->instr_len += 4;
+ state->aa = 0;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->wb_reg, regs, cregs);
+ }
+ state->src2 = FIELD_s9(state->words[0]);
+ state->dest = FIELD_A(state->words[0]);
+ state->pref = (state->dest == REG_LIMM);
+ break;
+
+ case op_ST:
+ state->write = 1;
+ state->di = BITS(state->words[0], 5, 5);
+ if (state->di)
+ break;
+ state->aa = BITS(state->words[0], 3, 4);
+ state->zz = BITS(state->words[0], 1, 2);
+ state->src1 = FIELD_C(state->words[0]);
+ if (state->src1 == REG_LIMM) {
+ state->instr_len += 4;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->src1, regs, cregs);
+ }
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->aa = 0;
+ state->instr_len += 4;
+ state->src2 = state->words[1];
+ } else {
+ state->src2 = get_reg(state->wb_reg, regs, cregs);
+ }
+ state->src3 = FIELD_s9(state->words[0]);
+ break;
+
+ case op_MAJOR_4:
+ subopcode = MINOR_OPCODE(state->words[0]);
+ switch (subopcode) {
+ case 32: /* Jcc */
+ case 33: /* Jcc.D */
+ case 34: /* JLcc */
+ case 35: /* JLcc.D */
+ is_linked = 0;
+
+ if (subopcode == 33 || subopcode == 35)
+ state->delay_slot = 1;
+
+ if (subopcode == 34 || subopcode == 35)
+ is_linked = 1;
+
+ fieldCisReg = 0;
+ op_format = BITS(state->words[0], 22, 23);
+ if (op_format == 0 || ((op_format == 3) &&
+ (!IS_BIT(state->words[0], 5)))) {
+ fieldC = FIELD_C(state->words[0]);
+
+ if (fieldC == REG_LIMM) {
+ fieldC = state->words[1];
+ state->instr_len += 4;
+ } else {
+ fieldCisReg = 1;
+ }
+ } else if (op_format == 1 || ((op_format == 3)
+ && (IS_BIT(state->words[0], 5)))) {
+ fieldC = FIELD_C(state->words[0]);
+ } else {
+ /* op_format == 2 */
+ fieldC = FIELD_s12(state->words[0]);
+ }
+
+ if (!fieldCisReg) {
+ state->target = fieldC;
+ state->flow = is_linked ?
+ direct_call : direct_jump;
+ } else {
+ state->target = get_reg(fieldC, regs, cregs);
+ state->flow = is_linked ?
+ indirect_call : indirect_jump;
+ }
+ state->is_branch = 1;
+ break;
+
+ case 40: /* LPcc */
+ if (BITS(state->words[0], 22, 23) == 3) {
+ /* Conditional LPcc u7 */
+ fieldC = FIELD_C(state->words[0]);
+
+ fieldC = fieldC << 1;
+ fieldC += (addr & ~0x03);
+ state->is_branch = 1;
+ state->flow = direct_jump;
+ state->target = fieldC;
+ }
+ /* For Unconditional lp, next pc is the fall through
+ * which is updated */
+ break;
+
+ case 48 ... 55: /* LD a,[b,c] */
+ state->di = BITS(state->words[0], 15, 15);
+ if (state->di)
+ break;
+ state->x = BITS(state->words[0], 16, 16);
+ state->zz = BITS(state->words[0], 17, 18);
+ state->aa = BITS(state->words[0], 22, 23);
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->instr_len += 4;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->wb_reg, regs,
+ cregs);
+ }
+ state->src2 = FIELD_C(state->words[0]);
+ if (state->src2 == REG_LIMM) {
+ state->instr_len += 4;
+ state->src2 = state->words[1];
+ } else {
+ state->src2 = get_reg(state->src2, regs,
+ cregs);
+ }
+ state->dest = FIELD_A(state->words[0]);
+ if (state->dest == REG_LIMM)
+ state->pref = 1;
+ break;
+
+ case 10: /* MOV */
+ /* still need to check for limm to extract instr len */
+ /* MOV is special case because it only takes 2 args */
+ switch (BITS(state->words[0], 22, 23)) {
+ case 0: /* OP a,b,c */
+ if (FIELD_C(state->words[0]) == REG_LIMM)
+ state->instr_len += 4;
+ break;
+ case 1: /* OP a,b,u6 */
+ break;
+ case 2: /* OP b,b,s12 */
+ break;
+ case 3: /* OP.cc b,b,c/u6 */
+ if ((!IS_BIT(state->words[0], 5)) &&
+ (FIELD_C(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+ }
+ break;
+
+
+ default:
+ /* Not a Load, Jump or Loop instruction */
+ /* still need to check for limm to extract instr len */
+ switch (BITS(state->words[0], 22, 23)) {
+ case 0: /* OP a,b,c */
+ if ((FIELD_B(state->words[0]) == REG_LIMM) ||
+ (FIELD_C(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+ case 1: /* OP a,b,u6 */
+ break;
+ case 2: /* OP b,b,s12 */
+ break;
+ case 3: /* OP.cc b,b,c/u6 */
+ if ((!IS_BIT(state->words[0], 5)) &&
+ ((FIELD_B(state->words[0]) == REG_LIMM) ||
+ (FIELD_C(state->words[0]) == REG_LIMM)))
+ state->instr_len += 4;
+ break;
+ }
+ break;
+ }
+ break;
+
+ /* 16 Bit Instructions */
+ case op_LD_ADD: /* LD_S|LDB_S|LDW_S a,[b,c] */
+ state->zz = BITS(state->words[0], 3, 4);
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->dest = FIELD_S_A(state->words[0]);
+ break;
+
+ case op_ADD_MOV_CMP:
+ /* check for limm, ignore mov_s h,b (== mov_s 0,b) */
+ if ((BITS(state->words[0], 3, 4) < 3) &&
+ (FIELD_S_H(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+
+ case op_S:
+ subopcode = BITS(state->words[0], 5, 7);
+ switch (subopcode) {
+ case 0: /* j_s */
+ case 1: /* j_s.d */
+ case 2: /* jl_s */
+ case 3: /* jl_s.d */
+ state->target = get_reg(FIELD_S_B(state->words[0]),
+ regs, cregs);
+ state->delay_slot = subopcode & 1;
+ state->flow = (subopcode >= 2) ?
+ direct_call : indirect_jump;
+ break;
+ case 7:
+ switch (BITS(state->words[0], 8, 10)) {
+ case 4: /* jeq_s [blink] */
+ case 5: /* jne_s [blink] */
+ case 6: /* j_s [blink] */
+ case 7: /* j_s.d [blink] */
+ state->delay_slot = (subopcode == 7);
+ state->flow = indirect_jump;
+ state->target = get_reg(31, regs, cregs);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+
+ case op_LD_S: /* LD_S c, [b, u7] */
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = FIELD_S_u7(state->words[0]);
+ state->dest = FIELD_S_C(state->words[0]);
+ break;
+
+ case op_LDB_S:
+ case op_STB_S:
+ /* no further handling required as byte accesses should not
+ * cause an unaligned access exception */
+ state->zz = 1;
+ break;
+
+ case op_LDWX_S: /* LDWX_S c, [b, u6] */
+ state->x = 1;
+ fallthrough;
+
+ case op_LDW_S: /* LDW_S c, [b, u6] */
+ state->zz = 2;
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = FIELD_S_u6(state->words[0]);
+ state->dest = FIELD_S_C(state->words[0]);
+ break;
+
+ case op_ST_S: /* ST_S c, [b, u7] */
+ state->write = 1;
+ state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src3 = FIELD_S_u7(state->words[0]);
+ break;
+
+ case op_STW_S: /* STW_S c,[b,u6] */
+ state->write = 1;
+ state->zz = 2;
+ state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src3 = FIELD_S_u6(state->words[0]);
+ break;
+
+ case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
+ /* note: we are ignoring possibility of:
+ * ADD_S, SUB_S, PUSH_S, POP_S as these should not
+ * cause unaligned exception anyway */
+ state->write = BITS(state->words[0], 6, 6);
+ state->zz = BITS(state->words[0], 5, 5);
+ if (state->zz)
+ break; /* byte accesses should not come here */
+ if (!state->write) {
+ state->src1 = get_reg(28, regs, cregs);
+ state->src2 = FIELD_S_u7(state->words[0]);
+ state->dest = FIELD_S_B(state->words[0]);
+ } else {
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs,
+ cregs);
+ state->src2 = get_reg(28, regs, cregs);
+ state->src3 = FIELD_S_u7(state->words[0]);
+ }
+ break;
+
+ case op_GP: /* LD_S|LDB_S|LDW_S r0,[gp,s11/s9/s10] */
+ /* note: ADD_S r0, gp, s11 is ignored */
+ state->zz = BITS(state->words[0], 9, 10);
+ state->src1 = get_reg(26, regs, cregs);
+ state->src2 = state->zz ? FIELD_S_s10(state->words[0]) :
+ FIELD_S_s11(state->words[0]);
+ state->dest = 0;
+ break;
+
+ case op_Pcl: /* LD_S b,[pcl,u10] */
+ state->src1 = regs->ret & ~3;
+ state->src2 = FIELD_S_u10(state->words[0]);
+ state->dest = FIELD_S_B(state->words[0]);
+ break;
+
+ case op_BR_S:
+ state->target = FIELD_S_s8(state->words[0]) + (addr & ~0x03);
+ state->flow = direct_jump;
+ state->is_branch = 1;
+ break;
+
+ case op_B_S:
+ fieldA = (BITS(state->words[0], 9, 10) == 3) ?
+ FIELD_S_s7(state->words[0]) :
+ FIELD_S_s10(state->words[0]);
+ state->target = fieldA + (addr & ~0x03);
+ state->flow = direct_jump;
+ state->is_branch = 1;
+ break;
+
+ case op_BL_S:
+ state->target = FIELD_S_s13(state->words[0]) + (addr & ~0x03);
+ state->flow = direct_call;
+ state->is_branch = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ if (bytes_not_copied <= (8 - state->instr_len))
+ return;
+
+fault: state->fault = 1;
+}
+
+long __kprobes get_reg(int reg, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ long *p;
+
+#if defined(CONFIG_ISA_ARCOMPACT)
+ if (reg <= 12) {
+ p = &regs->r0;
+ return p[-reg];
+ }
+#else /* CONFIG_ISA_ARCV2 */
+ if (reg <= 11) {
+ p = &regs->r0;
+ return p[reg];
+ }
+
+ if (reg == 12)
+ return regs->r12;
+ if (reg == 30)
+ return regs->r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ if (reg == 58)
+ return regs->r58;
+ if (reg == 59)
+ return regs->r59;
+#endif
+#endif
+ if (cregs && (reg <= 25)) {
+ p = &cregs->r13;
+ return p[13 - reg];
+ }
+
+ if (reg == 26)
+ return regs->r26;
+ if (reg == 27)
+ return regs->fp;
+ if (reg == 28)
+ return regs->sp;
+ if (reg == 31)
+ return regs->blink;
+
+ return 0;
+}
+
+void __kprobes set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ long *p;
+
+#if defined(CONFIG_ISA_ARCOMPACT)
+ switch (reg) {
+ case 0 ... 12:
+ p = &regs->r0;
+ p[-reg] = val;
+ break;
+ case 13 ... 25:
+ if (cregs) {
+ p = &cregs->r13;
+ p[13 - reg] = val;
+ }
+ break;
+ case 26:
+ regs->r26 = val;
+ break;
+ case 27:
+ regs->fp = val;
+ break;
+ case 28:
+ regs->sp = val;
+ break;
+ case 31:
+ regs->blink = val;
+ break;
+ default:
+ break;
+ }
+#else /* CONFIG_ISA_ARCV2 */
+ switch (reg) {
+ case 0 ... 11:
+ p = &regs->r0;
+ p[reg] = val;
+ break;
+ case 12:
+ regs->r12 = val;
+ break;
+ case 13 ... 25:
+ if (cregs) {
+ p = &cregs->r13;
+ p[13 - reg] = val;
+ }
+ break;
+ case 26:
+ regs->r26 = val;
+ break;
+ case 27:
+ regs->fp = val;
+ break;
+ case 28:
+ regs->sp = val;
+ break;
+ case 30:
+ regs->r30 = val;
+ break;
+ case 31:
+ regs->blink = val;
+ break;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ case 58:
+ regs->r58 = val;
+ break;
+ case 59:
+ regs->r59 = val;
+ break;
+#endif
+ default:
+ break;
+ }
+#endif
+}
+
+/*
+ * Disassembles the insn at @pc and sets @next_pc to next PC (which could be
+ * @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns).
+ *
+ * If @pc is a branch
+ * -@tgt_if_br is set to branch target.
+ * -If branch has delay slot, @next_pc updated with actual next PC.
+ */
+int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
+ struct callee_regs *cregs,
+ unsigned long *next_pc, unsigned long *tgt_if_br)
+{
+ struct disasm_state instr;
+
+ disasm_instr(pc, &instr, 0, regs, cregs);
+
+ *next_pc = pc + instr.instr_len;
+
+ /* Instruction with possible two targets branch, jump and loop */
+ if (instr.is_branch)
+ *tgt_if_br = instr.target;
+
+ /* For the instructions with delay slots, the fall through is the
+ * instruction following the instruction in delay slot.
+ */
+ if (instr.delay_slot) {
+ struct disasm_state instr_d;
+
+ disasm_instr(*next_pc, &instr_d, 0, regs, cregs);
+
+ *next_pc += instr_d.instr_len;
+ }
+
+ /* Zero Overhead Loop - end of the loop */
+ if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end)
+ && (regs->lp_count > 1)) {
+ *next_pc = regs->lp_start;
+ }
+
+ return instr.is_branch;
+}
+
+#endif /* CONFIG_KGDB || CONFIG_ARC_EMUL_UNALIGNED || CONFIG_KPROBES */
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
new file mode 100644
index 0000000000..2e49c81c80
--- /dev/null
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ARCv2 ISA based core Low Level Intr/Traps/Exceptions(non-TLB) Handling
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
+#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
+#include <asm/errno.h>
+#include <asm/arcregs.h>
+#include <asm/irqflags.h>
+#include <asm/mmu.h>
+
+; A maximum number of supported interrupts in the core interrupt controller.
+; This number is not equal to the maximum interrupt number (256) because
+; first 16 lines are reserved for exceptions and are not configurable.
+#define NR_CPU_IRQS 240
+
+ .cpu HS
+
+#define VECTOR .word
+
+;############################ Vector Table #################################
+
+ .section .vector,"a",@progbits
+ .align 4
+
+# Initial 16 slots are Exception Vectors
+VECTOR res_service ; Reset Vector
+VECTOR mem_service ; Mem exception
+VECTOR instr_service ; Instrn Error
+VECTOR EV_MachineCheck ; Fatal Machine check
+VECTOR EV_TLBMissI ; Intruction TLB miss
+VECTOR EV_TLBMissD ; Data TLB miss
+VECTOR EV_TLBProtV ; Protection Violation
+VECTOR EV_PrivilegeV ; Privilege Violation
+VECTOR EV_SWI ; Software Breakpoint
+VECTOR EV_Trap ; Trap exception
+VECTOR EV_Extension ; Extn Instruction Exception
+VECTOR EV_DivZero ; Divide by Zero
+VECTOR EV_DCError ; Data Cache Error
+VECTOR EV_Misaligned ; Misaligned Data Access
+VECTOR reserved ; Reserved slots
+VECTOR reserved ; Reserved slots
+
+# Begin Interrupt Vectors
+VECTOR handle_interrupt ; (16) Timer0
+VECTOR handle_interrupt ; unused (Timer1)
+VECTOR handle_interrupt ; unused (WDT)
+VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
+VECTOR handle_interrupt ; (20) perf Interrupt
+VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
+VECTOR handle_interrupt ; unused
+VECTOR handle_interrupt ; (23) unused
+# End of fixed IRQs
+
+.rept NR_CPU_IRQS - 8
+ VECTOR handle_interrupt
+.endr
+
+ .section .text, "ax",@progbits
+
+reserved:
+ flag 1 ; Unexpected event, halt
+
+;##################### Interrupt Handling ##############################
+
+ENTRY(handle_interrupt)
+
+ INTERRUPT_PROLOGUE
+
+ # irq control APIs local_irq_save/restore/disable/enable fiddle with
+ # global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio)
+ # However a taken interrupt doesn't clear these bits. Thus irqs_disabled()
+ # query in hard ISR path would return false (since .IE is set) which would
+ # trips genirq interrupt handling asserts.
+ #
+ # So do a "soft" disable of interrutps here.
+ #
+ # Note this disable is only for consistent book-keeping as further interrupts
+ # will be disabled anyways even w/o this. Hardware tracks active interrupts
+ # seperately in AUX_IRQ_ACT.active and will not take new interrupts
+ # unless this one returns (or higher prio becomes pending in 2-prio scheme)
+
+ IRQ_DISABLE
+
+ ; icause is banked: one per priority level
+ ; so a higher prio interrupt taken here won't clobber prev prio icause
+ lr r0, [ICAUSE]
+ mov blink, ret_from_exception
+
+ b.d arch_do_IRQ
+ mov r1, sp
+
+END(handle_interrupt)
+
+;################### Non TLB Exception Handling #############################
+
+ENTRY(EV_SWI)
+ ; TODO: implement this
+ EXCEPTION_PROLOGUE
+ b ret_from_exception
+END(EV_SWI)
+
+ENTRY(EV_DivZero)
+ ; TODO: implement this
+ EXCEPTION_PROLOGUE
+ b ret_from_exception
+END(EV_DivZero)
+
+ENTRY(EV_DCError)
+ ; TODO: implement this
+ EXCEPTION_PROLOGUE
+ b ret_from_exception
+END(EV_DCError)
+
+; ---------------------------------------------
+; Memory Error Exception Handler
+; - Unlike ARCompact, handles Bus errors for both User/Kernel mode,
+; Instruction fetch or Data access, under a single Exception Vector
+; ---------------------------------------------
+
+ENTRY(mem_service)
+
+ EXCEPTION_PROLOGUE
+
+ bl do_memory_error
+ b ret_from_exception
+END(mem_service)
+
+ENTRY(EV_Misaligned)
+
+ EXCEPTION_PROLOGUE
+
+ SAVE_CALLEE_SAVED_USER
+ mov r2, sp ; callee_regs
+
+ bl do_misaligned_access
+
+ ; TBD: optimize - do this only if a callee reg was involved
+ ; either a dst of emulated LD/ST or src with address-writeback
+ RESTORE_CALLEE_SAVED_USER
+
+ b ret_from_exception
+END(EV_Misaligned)
+
+; ---------------------------------------------
+; Protection Violation Exception Handler
+; ---------------------------------------------
+
+ENTRY(EV_TLBProtV)
+
+ EXCEPTION_PROLOGUE
+
+ mov blink, ret_from_exception
+ b do_page_fault
+
+END(EV_TLBProtV)
+
+; From Linux standpoint Slow Path I/D TLB Miss is same a ProtV as they
+; need to call do_page_fault().
+; ECR in pt_regs provides whether access was R/W/X
+
+.global call_do_page_fault
+.set call_do_page_fault, EV_TLBProtV
+
+;############# Common Handlers for ARCompact and ARCv2 ##############
+
+#include "entry.S"
+
+;############# Return from Intr/Excp/Trap (ARCv2 ISA Specifics) ##############
+;
+; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
+; IRQ shd definitely not happen between now and rtie
+; All 2 entry points to here already disable interrupts
+
+.Lrestore_regs:
+restore_regs:
+
+ # Interrpts are actually disabled from this point on, but will get
+ # reenabled after we return from interrupt/exception.
+ # But irq tracer needs to be told now...
+ TRACE_ASM_IRQ_ENABLE
+
+ ld r0, [sp, PT_status32] ; U/K mode at time of entry
+ lr r10, [AUX_IRQ_ACT]
+
+ bmsk r11, r10, 15 ; extract AUX_IRQ_ACT.active
+ breq r11, 0, .Lexcept_ret ; No intr active, ret from Exception
+
+;####### Return from Intr #######
+
+.Lisr_ret:
+
+debug_marker_l1:
+ ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
+ btst r0, STATUS_DE_BIT ; Z flag set if bit clear
+ bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
+
+ ; Handle special case #1: (Entry via Exception, Return via IRQ)
+ ;
+ ; Exception in U mode, preempted in kernel, Intr taken (K mode), orig
+ ; task now returning to U mode (riding the Intr)
+ ; AUX_IRQ_ACTIVE won't have U bit set (since intr in K mode), hence SP
+ ; won't be switched to correct U mode value (from AUX_SP)
+ ; So force AUX_IRQ_ACT.U for such a case
+
+ btst r0, STATUS_U_BIT ; Z flag set if K (Z clear for U)
+ bset.nz r11, r11, AUX_IRQ_ACT_BIT_U ; NZ means U
+ sr r11, [AUX_IRQ_ACT]
+
+ INTERRUPT_EPILOGUE
+ rtie
+
+;####### Return from Exception / pure kernel mode #######
+
+.Lexcept_ret: ; Expects r0 has PT_status32
+
+debug_marker_syscall:
+ EXCEPTION_EPILOGUE
+ rtie
+
+;####### Return from Intr to insn in delay slot #######
+
+; Handle special case #2: (Entry via Exception in Delay Slot, Return via IRQ)
+;
+; Intr returning to a Delay Slot (DS) insn
+; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
+; entry was via Exception in DS which got preempted in kernel).
+;
+; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
+;
+; Solution is to drop out of interrupt context into pure kernel mode
+; and return from pure kernel mode which does right things for delay slot
+
+.Lintr_ret_to_delay_slot:
+debug_marker_ds:
+
+ ld r2, [@intr_to_DE_cnt]
+ add r2, r2, 1
+ st r2, [@intr_to_DE_cnt]
+
+ ; drop out of interrupt context (clear AUX_IRQ_ACT.active)
+ bmskn r11, r10, 15
+ sr r11, [AUX_IRQ_ACT]
+ b .Lexcept_ret
+
+END(ret_from_exception)
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
new file mode 100644
index 0000000000..774c03cc1d
--- /dev/null
+++ b/arch/arc/kernel/entry-compact.S
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARCompact ISA
+ *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * vineetg: May 2011
+ * -Userspace unaligned access emulation
+ *
+ * vineetg: Feb 2011 (ptrace low level code fixes)
+ * -traced syscall return code (r0) was not saved into pt_regs for restoring
+ * into user reg-file when traded task rets to user space.
+ * -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
+ * were not invoking post-syscall trace hook (jumping directly into
+ * ret_from_system_call)
+ *
+ * vineetg: Nov 2010:
+ * -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
+ * -To maintain the slot size of 8 bytes/vector, added nop, which is
+ * not executed at runtime.
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
+ * -Wrappers for sys_{,rt_}sigsuspend() no longer needed as they don't
+ * need ptregs anymore
+ *
+ * Vineetg: Oct 2009
+ * -In a rare scenario, Process gets a Priv-V exception and gets scheduled
+ * out. Since we don't do FAKE RTIE for Priv-V, CPU exception state remains
+ * active (AE bit enabled). This causes a double fault for a subseq valid
+ * exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
+ * Instr Error could also cause similar scenario, so same there as well.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
+ * setting it and not clearing it clears ZOL context
+ *
+ * Vineetg: May 16th, 2008
+ * - r25 now contains the Current Task when in kernel
+ *
+ * Vineetg: Dec 22, 2007
+ * Minor Surgery of Low Level ISR to make it SMP safe
+ * - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
+ * - _current_task is made an array of NR_CPUS
+ * - Access of _current_task wrapped inside a macro so that if hardware
+ * team agrees for a dedicated reg, no other code is touched
+ *
+ * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
+ */
+
+#include <linux/errno.h>
+#include <linux/linkage.h> /* {ENTRY,EXIT} */
+#include <asm/entry.h>
+#include <asm/irqflags.h>
+
+ .cpu A7
+
+;############################ Vector Table #################################
+
+.macro VECTOR lbl
+#if 1 /* Just in case, build breaks */
+ j \lbl
+#else
+ b \lbl
+ nop
+#endif
+.endm
+
+ .section .vector, "ax",@progbits
+ .align 4
+
+/* Each entry in the vector table must occupy 2 words. Since it is a jump
+ * across sections (.vector to .text) we are guaranteed that 'j somewhere'
+ * will use the 'j limm' form of the instruction as long as somewhere is in
+ * a section other than .vector.
+ */
+
+; ********* Critical System Events **********************
+VECTOR res_service ; 0x0, Reset Vector (0x0)
+VECTOR mem_service ; 0x8, Mem exception (0x1)
+VECTOR instr_service ; 0x10, Instrn Error (0x2)
+
+; ******************** Device ISRs **********************
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+.rept 28
+VECTOR handle_interrupt_level1 ; Other devices
+.endr
+
+/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
+
+; ******************** Exceptions **********************
+VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20)
+VECTOR EV_TLBMissI ; 0x108, Instruction TLB miss (0x21)
+VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22)
+VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23)
+ ; or Misaligned Access
+VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24)
+VECTOR EV_Trap ; 0x128, Trap exception (0x25)
+VECTOR EV_Extension ; 0x130, Extn Instruction Excp (0x26)
+
+.rept 24
+VECTOR reserved ; Reserved Exceptions
+.endr
+
+
+;##################### Scratch Mem for IRQ stack switching #############
+
+ARCFP_DATA int1_saved_reg
+ .align 32
+ .type int1_saved_reg, @object
+ .size int1_saved_reg, 4
+int1_saved_reg:
+ .zero 4
+
+/* Each Interrupt level needs its own scratch */
+ARCFP_DATA int2_saved_reg
+ .type int2_saved_reg, @object
+ .size int2_saved_reg, 4
+int2_saved_reg:
+ .zero 4
+
+; ---------------------------------------------
+ .section .text, "ax",@progbits
+
+
+reserved:
+ flag 1 ; Unexpected event, halt
+
+;##################### Interrupt Handling ##############################
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+; ---------------------------------------------
+; Level 2 ISR: Can interrupt a Level 1 ISR
+; ---------------------------------------------
+ENTRY(handle_interrupt_level2)
+
+ INTERRUPT_PROLOGUE 2
+
+ ;------------------------------------------------------
+ ; if L2 IRQ interrupted a L1 ISR, disable preemption
+ ;
+ ; This is to avoid a potential L1-L2-L1 scenario
+ ; -L1 IRQ taken
+ ; -L2 interrupts L1 (before L1 ISR could run)
+ ; -preemption off IRQ, user task in syscall picked to run
+ ; -RTIE to userspace
+ ; Returns from L2 context fine
+ ; But both L1 and L2 re-enabled, so another L1 can be taken
+ ; while prev L1 is still unserviced
+ ;
+ ;------------------------------------------------------
+
+ ; L2 interrupting L1 implies both L2 and L1 active
+ ; However both A2 and A1 are NOT set in STATUS32, thus
+ ; need to check STATUS32_L2 to determine if L1 was active
+
+ ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
+ bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
+
+ ; bump thread_info->preempt_count (Disable preemption)
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+ add r9, r9, 1
+ st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+1:
+ ;------------------------------------------------------
+ ; setup params for Linux common ISR and invoke it
+ ;------------------------------------------------------
+ lr r0, [icause2]
+ and r0, r0, 0x1f
+
+ bl.d @arch_do_IRQ
+ mov r1, sp
+
+ mov r8,0x2
+ sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
+
+ b ret_from_exception
+
+END(handle_interrupt_level2)
+
+#endif
+
+; ---------------------------------------------
+; User Mode Memory Bus Error Interrupt Handler
+; (Kernel mode memory errors handled via separate exception vectors)
+; ---------------------------------------------
+ENTRY(mem_service)
+
+ INTERRUPT_PROLOGUE 2
+
+ mov r0, ilink2
+ mov r1, sp
+
+ ; User process needs to be killed with SIGBUS, but first need to get
+ ; out of the L2 interrupt context (drop to pure kernel mode) and jump
+ ; off to "C" code where SIGBUS in enqueued
+ lr r3, [status32]
+ bclr r3, r3, STATUS_A2_BIT
+ or r3, r3, (STATUS_E1_MASK|STATUS_E2_MASK)
+ sr r3, [status32_l2]
+ mov ilink2, 1f
+ rtie
+1:
+ bl do_memory_error
+ b ret_from_exception
+END(mem_service)
+
+; ---------------------------------------------
+; Level 1 ISR
+; ---------------------------------------------
+ENTRY(handle_interrupt_level1)
+
+ INTERRUPT_PROLOGUE 1
+
+ lr r0, [icause1]
+ and r0, r0, 0x1f
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ ; icause1 needs to be read early, before calling tracing, which
+ ; can clobber scratch regs, hence use of stack to stash it
+ push r0
+ TRACE_ASM_IRQ_DISABLE
+ pop r0
+#endif
+
+ bl.d @arch_do_IRQ
+ mov r1, sp
+
+ mov r8,0x1
+ sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
+
+ b ret_from_exception
+END(handle_interrupt_level1)
+
+;################### Non TLB Exception Handling #############################
+
+; ---------------------------------------------
+; Protection Violation Exception Handler
+; ---------------------------------------------
+
+ENTRY(EV_TLBProtV)
+
+ EXCEPTION_PROLOGUE ; ECR returned in r10
+
+ ;------ (5) Type of Protection Violation? ----------
+ ;
+ ; ProtV Hardware Exception is triggered for Access Faults of 2 types
+ ; -Access Violation : 00_23_(00|01|02|03)_00
+ ; x r w r+w
+ ; -Unaligned Access : 00_23_04_00
+ bbit1 r10, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
+
+ ;========= (6a) Access Violation Processing ========
+ bl do_page_fault
+ b ret_from_exception
+
+ ;========== (6b) Non aligned access ============
+4:
+
+ SAVE_CALLEE_SAVED_USER
+ mov r2, sp ; callee_regs
+
+ bl do_misaligned_access
+
+ ; TBD: optimize - do this only if a callee reg was involved
+ ; either a dst of emulated LD/ST or src with address-writeback
+ RESTORE_CALLEE_SAVED_USER
+
+ b ret_from_exception
+
+END(EV_TLBProtV)
+
+; Wrapper for Linux page fault handler called from EV_TLBMiss*
+; Very similar to ProtV handler case (6a) above, but avoids the extra checks
+; for Misaligned access
+;
+ENTRY(call_do_page_fault)
+
+ EXCEPTION_PROLOGUE
+
+ mov blink, ret_from_exception
+ b do_page_fault
+
+END(call_do_page_fault)
+
+;############# Common Handlers for ARCompact and ARCv2 ##############
+
+#include "entry.S"
+
+;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
+;
+; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
+; IRQ shd definitely not happen between now and rtie
+; All 2 entry points to here already disable interrupts
+
+.Lrestore_regs:
+
+ # Interrupts are actually disabled from this point on, but will get
+ # reenabled after we return from interrupt/exception.
+ # But irq tracer needs to be told now...
+ TRACE_ASM_IRQ_ENABLE
+
+ lr r10, [status32]
+
+ ; Restore REG File. In case multiple Events outstanding,
+ ; use the same priority as rtie: EXCPN, L2 IRQ, L1 IRQ, None
+ ; Note that we use realtime STATUS32 (not pt_regs->status32) to
+ ; decide that.
+
+ and.f 0, r10, (STATUS_A1_MASK|STATUS_A2_MASK)
+ bz .Lexcep_or_pure_K_ret
+
+ ; Returning from Interrupts (Level 1 or 2)
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ ; Level 2 interrupt return Path - from hardware standpoint
+ bbit0 r10, STATUS_A2_BIT, not_level2_interrupt
+
+ ;------------------------------------------------------------------
+ ; However the context returning might not have taken L2 intr itself
+ ; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret
+ ; Special considerations needed for the context which took L2 intr
+
+ ld r9, [sp, PT_event] ; Ensure this is L2 intr context
+ brne r9, event_IRQ2, 149f
+
+ ;------------------------------------------------------------------
+ ; if L2 IRQ interrupted an L1 ISR, we'd disabled preemption earlier
+ ; so that sched doesn't move to new task, causing L1 to be delayed
+ ; undeterministically. Now that we've achieved that, let's reset
+ ; things to what they were, before returning from L2 context
+ ;----------------------------------------------------------------
+
+ ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
+ bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal
+
+ ; decrement thread_info->preempt_count (re-enable preemption)
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+ ; paranoid check, given A1 was active when A2 happened, preempt count
+ ; must not be 0 because we would have incremented it.
+ ; If this does happen we simply HALT as it means a BUG !!!
+ cmp r9, 0
+ bnz 2f
+ flag 1
+
+2:
+ sub r9, r9, 1
+ st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+149:
+ INTERRUPT_EPILOGUE 2 ; return from level 2 interrupt
+debug_marker_l2:
+ rtie
+
+not_level2_interrupt:
+
+#endif
+
+ INTERRUPT_EPILOGUE 1 ; return from level 1 interrupt
+debug_marker_l1:
+ rtie
+
+.Lexcep_or_pure_K_ret:
+
+ ;this case is for syscalls or Exceptions or pure kernel mode
+
+ EXCEPTION_EPILOGUE
+debug_marker_syscall:
+ rtie
+
+END(ret_from_exception)
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
new file mode 100644
index 0000000000..089f668051
--- /dev/null
+++ b/arch/arc/kernel/entry.S
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
+ * (included from entry-<isa>.S
+ *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*------------------------------------------------------------------
+ * Function ABI
+ *------------------------------------------------------------------
+ *
+ * Arguments r0 - r7
+ * Caller Saved Registers r0 - r12
+ * Callee Saved Registers r13- r25
+ * Global Pointer (gp) r26
+ * Frame Pointer (fp) r27
+ * Stack Pointer (sp) r28
+ * Branch link register (blink) r31
+ *------------------------------------------------------------------
+ */
+
+;################### Special Sys Call Wrappers ##########################
+
+ENTRY(sys_clone_wrapper)
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_clone
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ and.f 0, r10, _TIF_SYSCALL_WORK
+ bnz tracesys_exit
+
+ b .Lret_from_system_call
+END(sys_clone_wrapper)
+
+ENTRY(sys_clone3_wrapper)
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_clone3
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ and.f 0, r10, _TIF_SYSCALL_WORK
+ bnz tracesys_exit
+
+ b .Lret_from_system_call
+END(sys_clone3_wrapper)
+
+ENTRY(ret_from_fork)
+ ; when the forked child comes here from the __switch_to function
+ ; r0 has the last task pointer.
+ ; put last task in scheduler queue
+ jl @schedule_tail
+
+ ld r9, [sp, PT_status32]
+ brne r9, 0, 1f
+
+ jl.d [r14] ; kernel thread entry point
+ mov r0, r13 ; (see PF_KTHREAD block in copy_thread)
+
+1:
+ ; Return to user space
+ ; 1. Any forked task (Reach here via BRne above)
+ ; 2. First ever init task (Reach here via return from JL above)
+ ; This is the historic "kernel_execve" use-case, to return to init
+ ; user mode, in a round about way since that is always done from
+ ; a kernel thread which is executed via JL above but always returns
+ ; out whenever kernel_execve (now inline do_fork()) is involved
+ b ret_from_exception
+END(ret_from_fork)
+
+;################### Non TLB Exception Handling #############################
+
+; ---------------------------------------------
+; Instruction Error Exception Handler
+; ---------------------------------------------
+
+ENTRY(instr_service)
+
+ EXCEPTION_PROLOGUE
+
+ bl do_insterror_or_kprobe
+ b ret_from_exception
+END(instr_service)
+
+; ---------------------------------------------
+; Machine Check Exception Handler
+; ---------------------------------------------
+
+ENTRY(EV_MachineCheck)
+
+ EXCEPTION_PROLOGUE_KEEP_AE ; ECR returned in r10
+
+ lr r0, [efa]
+ mov r1, sp
+
+ ; MC excpetions disable MMU
+ ARC_MMU_REENABLE r3
+
+ lsr r3, r10, 8
+ bmsk r3, r3, 7
+ brne r3, ECR_C_MCHK_DUP_TLB, 1f
+
+ bl do_tlb_overlap_fault
+ b ret_from_exception
+
+1:
+ ; DEAD END: can't do much, display Regs and HALT
+ SAVE_CALLEE_SAVED_USER
+
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ j do_machine_check_fault
+
+END(EV_MachineCheck)
+
+; ---------------------------------------------
+; Privilege Violation Exception Handler
+; ---------------------------------------------
+ENTRY(EV_PrivilegeV)
+
+ EXCEPTION_PROLOGUE
+
+ bl do_privilege_fault
+ b ret_from_exception
+END(EV_PrivilegeV)
+
+; ---------------------------------------------
+; Extension Instruction Exception Handler
+; ---------------------------------------------
+ENTRY(EV_Extension)
+
+ EXCEPTION_PROLOGUE
+
+ bl do_extension_fault
+ b ret_from_exception
+END(EV_Extension)
+
+;################ Trap Handling (Syscall, Breakpoint) ##################
+
+; ---------------------------------------------
+; syscall Tracing
+; ---------------------------------------------
+tracesys:
+ ; safekeep EFA (r12) if syscall tracer wanted PC
+ ; for traps, ERET is pre-commit so points to next-PC
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
+ st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
+
+ ; PRE syscall trace hook
+ mov r0, sp ; pt_regs
+ bl @syscall_trace_enter
+
+ ; Tracing code now returns the syscall num (orig or modif)
+ mov r8, r0
+
+ ; Do the Sys Call as we normally would.
+ cmp r8, NR_syscalls - 1
+ mov.hi r0, -ENOSYS
+ bhi tracesys_exit
+
+ ; Restore the sys-call args. Mere invocation of the hook abv could have
+ ; clobbered them (since they are in scratch regs). The tracer could also
+ ; have deliberately changed the syscall args: r0-r7
+ ld r0, [sp, PT_r0]
+ ld r1, [sp, PT_r1]
+ ld r2, [sp, PT_r2]
+ ld r3, [sp, PT_r3]
+ ld r4, [sp, PT_r4]
+ ld r5, [sp, PT_r5]
+ ld r6, [sp, PT_r6]
+ ld r7, [sp, PT_r7]
+ ld.as r9, [sys_call_table, r8]
+ jl [r9]
+
+tracesys_exit:
+ st r0, [sp, PT_r0]
+
+ ; POST syscall trace hook
+ mov r0, sp ; pt_regs needed
+ bl @syscall_trace_exit
+
+ ; don't call ret_from_system_call as it saves r0, already done above
+ b ret_from_exception
+
+; ---------------------------------------------
+; Breakpoint TRAP
+; ---------------------------------------------
+trap_with_param:
+ mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
+ mov r1, sp ; pt_regs
+
+ ; save callee regs in case tracer/gdb wants to peek
+ SAVE_CALLEE_SAVED_USER
+
+ ; safekeep ref to callee regs
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ ; call the non syscall trap handler
+ bl do_non_swi_trap
+
+ ; unwind stack to discard callee regs
+ DISCARD_CALLEE_SAVED_USER
+
+ b ret_from_exception
+
+; ---------------------------------------------
+; syscall TRAP
+; ABI: (r0-r7) upto 8 args, (r8) syscall number
+; ---------------------------------------------
+
+ENTRY(EV_Trap)
+
+ EXCEPTION_PROLOGUE_KEEP_AE
+
+ lr r12, [efa]
+
+ FAKE_RET_FROM_EXCPN
+
+ ;============ TRAP N : breakpoints, kprobes etc
+ bmsk.f 0, r10, 7
+ bnz trap_with_param
+
+ ;============ TRAP 0 (no param): syscall
+
+ ; syscall tracing ongoing, invoke pre-post-hooks around syscall
+ GET_CURR_THR_INFO_FLAGS r10
+ and.f 0, r10, _TIF_SYSCALL_WORK
+ bnz tracesys ; this never comes back
+
+ ;============ Normal syscall case
+
+ cmp r8, NR_syscalls - 1
+ mov.hi r0, -ENOSYS
+ bhi .Lret_from_system_call
+
+ ld.as r9,[sys_call_table, r8]
+ jl [r9]
+
+.Lret_from_system_call:
+ st r0, [sp, PT_r0] ; sys call return value in pt_regs
+
+ ; fall through to ret_from_exception
+END(EV_Trap)
+
+;############# Return from Intr/Excp/Trap (Linux Specifics) ##############
+;
+; If ret to user mode do we need to handle signals, schedule() et al.
+
+ENTRY(ret_from_exception)
+
+ ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
+ ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
+
+ bbit0 r8, STATUS_U_BIT, resume_kernel_mode
+
+ ; Before returning to User mode check-for-and-complete any pending work
+ ; such as rescheduling/signal-delivery etc.
+resume_user_mode_begin:
+
+ ; Disable IRQs to ensures that chk for pending work itself is atomic
+ ; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an
+ ; interim IRQ).
+ IRQ_DISABLE r10
+
+ ; Fast Path return to user mode if no pending work
+ GET_CURR_THR_INFO_FLAGS r9
+ and.f 0, r9, _TIF_WORK_MASK
+ bz .Lrestore_regs
+
+ ; --- (Slow Path #1) task preemption ---
+ bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals
+ mov blink, resume_user_mode_begin ; tail-call to U mode ret chks
+ j @schedule ; BTST+Bnz causes relo error in link
+
+.Lchk_pend_signals:
+ IRQ_ENABLE r10
+
+ ; --- (Slow Path #2) pending signal ---
+ mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
+
+ GET_CURR_THR_INFO_FLAGS r9
+ and.f 0, r9, _TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL
+ bz .Lchk_notify_resume
+
+ ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
+ ; in pt_reg since the "C" ABI (kernel code) will automatically
+ ; save/restore callee-saved regs.
+ ;
+ ; However, here we need to explicitly save callee regs because
+ ; (i) If this signal causes coredump - full regfile needed
+ ; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus
+ ; tracer might call PEEKUSR(CALLEE reg)
+ ;
+ ; NOTE: SP will grow up by size of CALLEE Reg-File
+ SAVE_CALLEE_SAVED_USER
+
+ ; save location of saved Callee Regs @ thread_struct->callee
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ bl @do_signal
+
+ ; Ideally we want to discard the Callee reg above, however if this was
+ ; a tracing signal, tracer could have done a POKEUSR(CALLEE reg)
+ RESTORE_CALLEE_SAVED_USER
+
+ b resume_user_mode_begin ; loop back to start of U mode ret
+
+ ; --- (Slow Path #3) notify_resume ---
+.Lchk_notify_resume:
+ btst r9, TIF_NOTIFY_RESUME
+ blnz @do_notify_resume
+ b resume_user_mode_begin ; unconditionally back to U mode ret chks
+ ; for single exit point from this block
+
+resume_kernel_mode:
+
+ ; Disable Interrupts from this point on
+ ; CONFIG_PREEMPTION: This is a must for preempt_schedule_irq()
+ ; !CONFIG_PREEMPTION: To ensure restore_regs is intr safe
+ IRQ_DISABLE r9
+
+#ifdef CONFIG_PREEMPTION
+
+ ; Can't preempt if preemption disabled
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
+ brne r8, 0, .Lrestore_regs
+
+ ; check if this task's NEED_RESCHED flag set
+ ld r9, [r10, THREAD_INFO_FLAGS]
+ bbit0 r9, TIF_NEED_RESCHED, .Lrestore_regs
+
+ ; Invoke PREEMPTION
+ jl preempt_schedule_irq
+
+ ; preempt_schedule_irq() always returns with IRQ disabled
+#endif
+
+ b .Lrestore_regs
+
+##### DONT ADD CODE HERE - .Lrestore_regs actually follows in entry-<isa>.S
+
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
new file mode 100644
index 0000000000..ec640219d9
--- /dev/null
+++ b/arch/arc/kernel/fpu.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * fpu.c - save/restore of Floating Point Unit Registers on task switch
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/sched.h>
+#include <asm/fpu.h>
+
+#ifdef CONFIG_ISA_ARCOMPACT
+
+/*
+ * To save/restore FPU regs, simplest scheme would use LR/SR insns.
+ * However since SR serializes the pipeline, an alternate "hack" can be used
+ * which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
+ *
+ * Store to 64bit dpfp1 reg from a pair of core regs:
+ * dexcl1 0, r1, r0 ; where r1:r0 is the 64 bit val
+ *
+ * Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
+ * mov_s r3, 0
+ * daddh11 r1, r3, r3 ; get "hi" into r1 (dpfp1 unchanged)
+ * dexcl1 r0, r1, r3 ; get "low" into r0 (dpfp1 low clobbered)
+ * dexcl1 0, r1, r0 ; restore dpfp1 to orig value
+ *
+ * However we can tweak the read, so that read-out of outgoing task's FPU regs
+ * and write of incoming task's regs happen in one shot. So all the work is
+ * done before context switch
+ */
+
+void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
+{
+ unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
+ unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
+
+ const unsigned int zero = 0;
+
+ __asm__ __volatile__(
+ "daddh11 %0, %2, %2\n"
+ "dexcl1 %1, %3, %4\n"
+ : "=&r" (*(saveto + 1)), /* early clobber must here */
+ "=&r" (*(saveto))
+ : "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
+ );
+
+ __asm__ __volatile__(
+ "daddh22 %0, %2, %2\n"
+ "dexcl2 %1, %3, %4\n"
+ : "=&r"(*(saveto + 3)), /* early clobber must here */
+ "=&r"(*(saveto + 2))
+ : "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
+ );
+}
+
+#else
+
+void fpu_init_task(struct pt_regs *regs)
+{
+ const unsigned int fwe = 0x80000000;
+
+ /* default rounding mode */
+ write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
+
+ /* Initialize to zero: setting requires FWE be set */
+ write_aux_reg(ARC_REG_FPU_STATUS, fwe);
+}
+
+void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
+{
+ struct arc_fpu *save = &prev->thread.fpu;
+ struct arc_fpu *restore = &next->thread.fpu;
+ const unsigned int fwe = 0x80000000;
+
+ save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
+ save->status = read_aux_reg(ARC_REG_FPU_STATUS);
+
+ write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
+ write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status));
+}
+
+#endif
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
new file mode 100644
index 0000000000..9152782444
--- /dev/null
+++ b/arch/arc/kernel/head.S
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ARC CPU startup Code
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Vineetg: Dec 2007
+ * -Check if we are running on Simulator or on real hardware
+ * to skip certain things during boot on simulator
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/entry.h>
+#include <asm/arcregs.h>
+#include <asm/cache.h>
+#include <asm/dsp-impl.h>
+#include <asm/irqflags.h>
+
+.macro CPU_EARLY_SETUP
+
+ ; Setting up Vectror Table (in case exception happens in early boot
+ sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
+
+ ; Disable I-cache/D-cache if kernel so configured
+ lr r5, [ARC_REG_IC_BCR]
+ breq r5, 0, 1f ; I$ doesn't exist
+ lr r5, [ARC_REG_IC_CTRL]
+#ifdef CONFIG_ARC_HAS_ICACHE
+ bclr r5, r5, 0 ; 0 - Enable, 1 is Disable
+#else
+ bset r5, r5, 0 ; I$ exists, but is not used
+#endif
+ sr r5, [ARC_REG_IC_CTRL]
+
+1:
+ lr r5, [ARC_REG_DC_BCR]
+ breq r5, 0, 1f ; D$ doesn't exist
+ lr r5, [ARC_REG_DC_CTRL]
+ bclr r5, r5, 6 ; Invalidate (discard w/o wback)
+#ifdef CONFIG_ARC_HAS_DCACHE
+ bclr r5, r5, 0 ; Enable (+Inv)
+#else
+ bset r5, r5, 0 ; Disable (+Inv)
+#endif
+ sr r5, [ARC_REG_DC_CTRL]
+
+1:
+
+#ifdef CONFIG_ISA_ARCV2
+ ; Unaligned access is disabled at reset, so re-enable early as
+ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
+ ; by default
+ lr r5, [status32]
+#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+ bset r5, r5, STATUS_AD_BIT
+#else
+ ; Although disabled at reset, bootloader might have enabled it
+ bclr r5, r5, STATUS_AD_BIT
+#endif
+ kflag r5
+
+#ifdef CONFIG_ARC_LPB_DISABLE
+ lr r5, [ARC_REG_LPB_BUILD]
+ breq r5, 0, 1f ; LPB doesn't exist
+ mov r5, 1
+ sr r5, [ARC_REG_LPB_CTRL]
+1:
+#endif /* CONFIG_ARC_LPB_DISABLE */
+
+ /* On HSDK, CCMs need to remapped super early */
+#ifdef CONFIG_ARC_SOC_HSDK
+ mov r6, 0x60000000
+ lr r5, [ARC_REG_ICCM_BUILD]
+ breq r5, 0, 1f
+ sr r6, [ARC_REG_AUX_ICCM]
+1:
+ lr r5, [ARC_REG_DCCM_BUILD]
+ breq r5, 0, 2f
+ sr r6, [ARC_REG_AUX_DCCM]
+2:
+#endif /* CONFIG_ARC_SOC_HSDK */
+
+#endif /* CONFIG_ISA_ARCV2 */
+
+ ; Config DSP_CTRL properly, so kernel may use integer multiply,
+ ; multiply-accumulate, and divide operations
+ DSP_EARLY_INIT
+.endm
+
+ .section .init.text, "ax",@progbits
+
+;----------------------------------------------------------------
+; Default Reset Handler (jumped into from Reset vector)
+; - Don't clobber r0,r1,r2 as they might have u-boot provided args
+; - Platforms can override this weak version if needed
+;----------------------------------------------------------------
+WEAK(res_service)
+ j stext
+END(res_service)
+
+;----------------------------------------------------------------
+; Kernel Entry point
+;----------------------------------------------------------------
+ENTRY(stext)
+
+ CPU_EARLY_SETUP
+
+#ifdef CONFIG_SMP
+ GET_CPU_ID r5
+ cmp r5, 0
+ mov.nz r0, r5
+ bz .Lmaster_proceed
+
+ ; Non-Masters wait for Master to boot enough and bring them up
+ ; when they resume, tail-call to entry point
+ mov blink, @first_lines_of_secondary
+ j arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
+#endif
+
+ ; Clear BSS before updating any globals
+ ; XXX: use ZOL here
+ mov r5, __bss_start
+ sub r6, __bss_stop, r5
+ lsr.f lp_count, r6, 2
+ lpnz 1f
+ st.ab 0, [r5, 4]
+1:
+
+ ; Uboot - kernel ABI
+ ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
+ ; r1 = magic number (always zero as of now)
+ ; r2 = pointer to uboot provided cmdline or external DTB in mem
+ ; These are handled later in handle_uboot_args()
+ st r0, [@uboot_tag]
+ st r1, [@uboot_magic]
+ st r2, [@uboot_arg]
+
+ ; setup "current" tsk and optionally cache it in dedicated r25
+ mov r9, @init_task
+ SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch
+
+ ; setup stack (fp, sp)
+ mov fp, 0
+
+ ; tsk->thread_info is really a PAGE, whose bottom hoists stack
+ GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
+
+ j start_kernel ; "C" entry point
+END(stext)
+
+#ifdef CONFIG_SMP
+;----------------------------------------------------------------
+; First lines of code run by secondary before jumping to 'C'
+;----------------------------------------------------------------
+ .section .text, "ax",@progbits
+ENTRY(first_lines_of_secondary)
+
+ ; setup per-cpu idle task as "current" on this CPU
+ ld r0, [@secondary_idle_tsk]
+ SET_CURR_TASK_ON_CPU r0, r1
+
+ ; setup stack (fp, sp)
+ mov fp, 0
+
+ ; set it's stack base to tsk->thread_info bottom
+ GET_TSK_STACK_BASE r0, sp
+
+ j start_kernel_secondary
+END(first_lines_of_secondary)
+#endif
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
new file mode 100644
index 0000000000..678898757e
--- /dev/null
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <asm/irq.h>
+
+#define NR_EXCEPTIONS 16
+
+struct bcr_irq_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8;
+#else
+ unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3;
+#endif
+};
+
+/*
+ * Early Hardware specific Interrupt setup
+ * -Called very early (start_kernel -> setup_arch -> setup_processor)
+ * -Platform Independent (must for any ARC Core)
+ * -Needed for each CPU (hence not foldable into init_IRQ)
+ */
+void arc_init_IRQ(void)
+{
+ unsigned int tmp, irq_prio, i;
+ struct bcr_irq_arcv2 irq_bcr;
+
+ struct aux_irq_ctrl {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int res3:18, save_idx_regs:1, res2:1,
+ save_u_to_u:1, save_lp_regs:1, save_blink:1,
+ res:4, save_nr_gpr_pairs:5;
+#else
+ unsigned int save_nr_gpr_pairs:5, res:4,
+ save_blink:1, save_lp_regs:1, save_u_to_u:1,
+ res2:1, save_idx_regs:1, res3:18;
+#endif
+ } ictrl;
+
+ *(unsigned int *)&ictrl = 0;
+
+#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
+ ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
+ ictrl.save_blink = 1;
+ ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
+ ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
+ ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
+#endif
+
+ WRITE_AUX(AUX_IRQ_CTRL, ictrl);
+
+ /*
+ * ARCv2 core intc provides multiple interrupt priorities (upto 16).
+ * Typical builds though have only two levels (0-high, 1-low)
+ * Linux by default uses lower prio 1 for most irqs, reserving 0 for
+ * NMI style interrupts in future (say perf)
+ */
+
+ READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
+
+ irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */
+ pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
+ irq_prio + 1, ARCV2_IRQ_DEF_PRIO,
+ irq_bcr.firq ? " FIRQ (not used)":"");
+
+ /*
+ * Set a default priority for all available interrupts to prevent
+ * switching of register banks if Fast IRQ and multiple register banks
+ * are supported by CPU.
+ * Also disable private-per-core IRQ lines so faulty external HW won't
+ * trigger interrupt that kernel is not ready to handle.
+ */
+ for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
+ write_aux_reg(AUX_IRQ_SELECT, i);
+ write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
+
+ /*
+ * Only mask cpu private IRQs here.
+ * "common" interrupts are masked at IDU, otherwise it would
+ * need to be unmasked at each cpu, with IPIs
+ */
+ if (i < FIRST_EXT_IRQ)
+ write_aux_reg(AUX_IRQ_ENABLE, 0);
+ }
+
+ /* setup status32, don't enable intr yet as kernel doesn't want */
+ tmp = read_aux_reg(ARC_REG_STATUS32);
+ tmp |= ARCV2_IRQ_DEF_PRIO << 1;
+ tmp &= ~STATUS_IE_MASK;
+ asm volatile("kflag %0 \n"::"r"(tmp));
+}
+
+static void arcv2_irq_mask(struct irq_data *data)
+{
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
+ write_aux_reg(AUX_IRQ_ENABLE, 0);
+}
+
+static void arcv2_irq_unmask(struct irq_data *data)
+{
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
+ write_aux_reg(AUX_IRQ_ENABLE, 1);
+}
+
+static void arcv2_irq_enable(struct irq_data *data)
+{
+ /* set default priority */
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
+ write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
+
+ /*
+ * hw auto enables (linux unmask) all by default
+ * So no need to do IRQ_ENABLE here
+ * XXX: However OSCI LAN need it
+ */
+ write_aux_reg(AUX_IRQ_ENABLE, 1);
+}
+
+static struct irq_chip arcv2_irq_chip = {
+ .name = "ARCv2 core Intc",
+ .irq_mask = arcv2_irq_mask,
+ .irq_unmask = arcv2_irq_unmask,
+ .irq_enable = arcv2_irq_enable
+};
+
+static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ /*
+ * core intc IRQs [16, 23]:
+ * Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
+ */
+ if (hw < FIRST_EXT_IRQ) {
+ /*
+ * A subsequent request_percpu_irq() fails if percpu_devid is
+ * not set. That in turns sets NOAUTOEN, meaning each core needs
+ * to call enable_percpu_irq()
+ */
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
+ } else {
+ irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops arcv2_irq_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = arcv2_irq_map,
+};
+
+
+static int __init
+init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
+{
+ struct irq_domain *root_domain;
+ struct bcr_irq_arcv2 irq_bcr;
+ unsigned int nr_cpu_irqs;
+
+ READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
+ nr_cpu_irqs = irq_bcr.irqs + NR_EXCEPTIONS;
+
+ if (parent)
+ panic("DeviceTree incore intc not a root irq controller\n");
+
+ root_domain = irq_domain_add_linear(intc, nr_cpu_irqs, &arcv2_irq_ops, NULL);
+ if (!root_domain)
+ panic("root irq domain not avail\n");
+
+ /*
+ * Needed for primary domain lookup to succeed
+ * This is a primary irqchip, and can never have a parent
+ */
+ irq_set_default_host(root_domain);
+
+#ifdef CONFIG_SMP
+ irq_create_mapping(root_domain, IPI_IRQ);
+#endif
+ irq_create_mapping(root_domain, SOFTIRQ_IRQ);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(arc_intc, "snps,archs-intc", init_onchip_IRQ);
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
new file mode 100644
index 0000000000..6885e42287
--- /dev/null
+++ b/arch/arc/kernel/intc-compact.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <asm/irq.h>
+
+#define NR_CPU_IRQS 32 /* number of irq lines coming in */
+#define TIMER0_IRQ 3 /* Fixed by ISA */
+
+/*
+ * Early Hardware specific Interrupt setup
+ * -Platform independent, needed for each CPU (not foldable into init_IRQ)
+ * -Called very early (start_kernel -> setup_arch -> setup_processor)
+ *
+ * what it does ?
+ * -Optionally, setup the High priority Interrupts as Level 2 IRQs
+ */
+void arc_init_IRQ(void)
+{
+ unsigned int level_mask = 0, i;
+
+ /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
+ level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
+
+ /*
+ * Write to register, even if no LV2 IRQs configured to reset it
+ * in case bootloader had mucked with it
+ */
+ write_aux_reg(AUX_IRQ_LEV, level_mask);
+
+ if (level_mask)
+ pr_info("Level-2 interrupts bitset %x\n", level_mask);
+
+ /*
+ * Disable all IRQ lines so faulty external hardware won't
+ * trigger interrupt that kernel is not ready to handle.
+ */
+ for (i = TIMER0_IRQ; i < NR_CPU_IRQS; i++) {
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb &= ~(1 << i);
+ write_aux_reg(AUX_IENABLE, ienb);
+ }
+}
+
+/*
+ * ARC700 core includes a simple on-chip intc supporting
+ * -per IRQ enable/disable
+ * -2 levels of interrupts (high/low)
+ * -all interrupts being level triggered
+ *
+ * To reduce platform code, we assume all IRQs directly hooked-up into intc.
+ * Platforms with external intc, hence cascaded IRQs, are free to over-ride
+ * below, per IRQ.
+ */
+
+static void arc_irq_mask(struct irq_data *data)
+{
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb &= ~(1 << data->hwirq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static void arc_irq_unmask(struct irq_data *data)
+{
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb |= (1 << data->hwirq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static struct irq_chip onchip_intc = {
+ .name = "ARC In-core Intc",
+ .irq_mask = arc_irq_mask,
+ .irq_unmask = arc_irq_unmask,
+};
+
+static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ switch (hw) {
+ case TIMER0_IRQ:
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
+ break;
+ default:
+ irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
+ }
+ return 0;
+}
+
+static const struct irq_domain_ops arc_intc_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = arc_intc_domain_map,
+};
+
+static int __init
+init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
+{
+ struct irq_domain *root_domain;
+
+ if (parent)
+ panic("DeviceTree incore intc not a root irq controller\n");
+
+ root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS,
+ &arc_intc_domain_ops, NULL);
+ if (!root_domain)
+ panic("root irq domain not avail\n");
+
+ /*
+ * Needed for primary domain lookup to succeed
+ * This is a primary irqchip, and can never have a parent
+ */
+ irq_set_default_host(root_domain);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
+
+/*
+ * arch_local_irq_enable - Enable interrupts.
+ *
+ * 1. Explicitly called to re-enable interrupts
+ * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
+ * which maybe in hard ISR itself
+ *
+ * Semantics of this function change depending on where it is called from:
+ *
+ * -If called from hard-ISR, it must not invert interrupt priorities
+ * e.g. suppose TIMER is high priority (Level 2) IRQ
+ * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
+ * Here local_irq_enable( ) shd not re-enable lower priority interrupts
+ * -If called from soft-ISR, it must re-enable all interrupts
+ * soft ISR are low priority jobs which can be very slow, thus all IRQs
+ * must be enabled while they run.
+ * Now hardware context wise we may still be in L2 ISR (not done rtie)
+ * still we must re-enable both L1 and L2 IRQs
+ * Another twist is prev scenario with flow being
+ * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
+ * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
+ * over-written (this is deficiency in ARC700 Interrupt mechanism)
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
+
+void arch_local_irq_enable(void)
+{
+ unsigned long flags = arch_local_save_flags();
+
+ if (flags & STATUS_A2_MASK)
+ flags |= STATUS_E2_MASK;
+ else if (flags & STATUS_A1_MASK)
+ flags |= STATUS_E1_MASK;
+
+ arch_local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(arch_local_irq_enable);
+#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
new file mode 100644
index 0000000000..dd09b58ff8
--- /dev/null
+++ b/arch/arc/kernel/irq.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <asm/mach_desc.h>
+
+#include <asm/irq_regs.h>
+#include <asm/smp.h>
+
+/*
+ * Late Interrupt system init called from start_kernel for Boot CPU only
+ *
+ * Since slab must already be initialized, platforms can start doing any
+ * needed request_irq( )s
+ */
+void __init init_IRQ(void)
+{
+ /*
+ * process the entire interrupt tree in one go
+ * Any external intc will be setup provided DT chains them
+ * properly
+ */
+ irqchip_init();
+
+#ifdef CONFIG_SMP
+ /* a SMP H/w block could do IPI IRQ request here */
+ if (plat_smp_ops.init_per_cpu)
+ plat_smp_ops.init_per_cpu(smp_processor_id());
+#endif
+
+ if (machine_desc->init_per_cpu)
+ machine_desc->init_per_cpu(smp_processor_id());
+}
+
+/*
+ * "C" Entry point for any ARC ISR, called from low level vector handler
+ * @irq is the vector number read from ICAUSE reg of on-chip intc
+ */
+void arch_do_IRQ(unsigned int hwirq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+
+ irq_enter();
+ old_regs = set_irq_regs(regs);
+ generic_handle_domain_irq(NULL, hwirq);
+ set_irq_regs(old_regs);
+ irq_exit();
+}
diff --git a/arch/arc/kernel/jump_label.c b/arch/arc/kernel/jump_label.c
new file mode 100644
index 0000000000..70b74a5d04
--- /dev/null
+++ b/arch/arc/kernel/jump_label.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+
+#include "asm/cacheflush.h"
+
+#define JUMPLABEL_ERR "ARC: jump_label: ERROR: "
+
+/* Halt system on fatal error to make debug easier */
+#define arc_jl_fatal(format...) \
+({ \
+ pr_err(JUMPLABEL_ERR format); \
+ BUG(); \
+})
+
+static inline u32 arc_gen_nop(void)
+{
+ /* 1x 32bit NOP in middle endian */
+ return 0x7000264a;
+}
+
+/*
+ * Atomic update of patched instruction is only available if this
+ * instruction doesn't cross L1 cache line boundary. You can read about
+ * the way we achieve this in arc/include/asm/jump_label.h
+ */
+static inline void instruction_align_assert(void *addr, int len)
+{
+ unsigned long a = (unsigned long)addr;
+
+ if ((a >> L1_CACHE_SHIFT) != ((a + len - 1) >> L1_CACHE_SHIFT))
+ arc_jl_fatal("instruction (addr %px) cross L1 cache line border",
+ addr);
+}
+
+/*
+ * ARCv2 'Branch unconditionally' instruction:
+ * 00000ssssssssss1SSSSSSSSSSNRtttt
+ * s S[n:0] lower bits signed immediate (number is bitfield size)
+ * S S[m:n+1] upper bits signed immediate (number is bitfield size)
+ * t S[24:21] upper bits signed immediate (branch unconditionally far)
+ * N N <.d> delay slot mode
+ * R R Reserved
+ */
+static inline u32 arc_gen_branch(jump_label_t pc, jump_label_t target)
+{
+ u32 instruction_l, instruction_r;
+ u32 pcl = pc & GENMASK(31, 2);
+ u32 u_offset = target - pcl;
+ u32 s, S, t;
+
+ /*
+ * Offset in 32-bit branch instruction must to fit into s25.
+ * Something is terribly broken if we get such huge offset within one
+ * function.
+ */
+ if ((s32)u_offset < -16777216 || (s32)u_offset > 16777214)
+ arc_jl_fatal("gen branch with offset (%d) not fit in s25",
+ (s32)u_offset);
+
+ /*
+ * All instructions are aligned by 2 bytes so we should never get offset
+ * here which is not 2 bytes aligned.
+ */
+ if (u_offset & 0x1)
+ arc_jl_fatal("gen branch with offset (%d) unaligned to 2 bytes",
+ (s32)u_offset);
+
+ s = (u_offset >> 1) & GENMASK(9, 0);
+ S = (u_offset >> 11) & GENMASK(9, 0);
+ t = (u_offset >> 21) & GENMASK(3, 0);
+
+ /* 00000ssssssssss1 */
+ instruction_l = (s << 1) | 0x1;
+ /* SSSSSSSSSSNRtttt */
+ instruction_r = (S << 6) | t;
+
+ return (instruction_r << 16) | (instruction_l & GENMASK(15, 0));
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ jump_label_t *instr_addr = (jump_label_t *)entry->code;
+ u32 instr;
+
+ instruction_align_assert(instr_addr, JUMP_LABEL_NOP_SIZE);
+
+ if (type == JUMP_LABEL_JMP)
+ instr = arc_gen_branch(entry->code, entry->target);
+ else
+ instr = arc_gen_nop();
+
+ WRITE_ONCE(*instr_addr, instr);
+ flush_icache_range(entry->code, entry->code + JUMP_LABEL_NOP_SIZE);
+}
+
+#ifdef CONFIG_ARC_DBG_JUMP_LABEL
+#define SELFTEST_MSG "ARC: instruction generation self-test: "
+
+struct arc_gen_branch_testdata {
+ jump_label_t pc;
+ jump_label_t target_address;
+ u32 expected_instr;
+};
+
+static __init int branch_gen_test(const struct arc_gen_branch_testdata *test)
+{
+ u32 instr_got;
+
+ instr_got = arc_gen_branch(test->pc, test->target_address);
+ if (instr_got == test->expected_instr)
+ return 0;
+
+ pr_err(SELFTEST_MSG "FAIL:\n arc_gen_branch(0x%08x, 0x%08x) != 0x%08x, got 0x%08x\n",
+ test->pc, test->target_address,
+ test->expected_instr, instr_got);
+
+ return -EFAULT;
+}
+
+/*
+ * Offset field in branch instruction is not continuous. Test all
+ * available offset field and sign combinations. Test data is generated
+ * from real working code.
+ */
+static const struct arc_gen_branch_testdata arcgenbr_test_data[] __initconst = {
+ {0x90007548, 0x90007514, 0xffcf07cd}, /* tiny (-52) offs */
+ {0x9000c9c0, 0x9000c782, 0xffcf05c3}, /* tiny (-574) offs */
+ {0x9000cc1c, 0x9000c782, 0xffcf0367}, /* tiny (-1178) offs */
+ {0x9009dce0, 0x9009d106, 0xff8f0427}, /* small (-3034) offs */
+ {0x9000f5de, 0x90007d30, 0xfc0f0755}, /* big (-30892) offs */
+ {0x900a2444, 0x90035f64, 0xc9cf0321}, /* huge (-443616) offs */
+ {0x90007514, 0x9000752c, 0x00000019}, /* tiny (+24) offs */
+ {0x9001a578, 0x9001a77a, 0x00000203}, /* tiny (+514) offs */
+ {0x90031ed8, 0x90032634, 0x0000075d}, /* tiny (+1884) offs */
+ {0x9008c7f2, 0x9008d3f0, 0x00400401}, /* small (+3072) offs */
+ {0x9000bb38, 0x9003b340, 0x17c00009}, /* big (+194568) offs */
+ {0x90008f44, 0x90578d80, 0xb7c2063d} /* huge (+5701180) offs */
+};
+
+static __init int instr_gen_test(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arcgenbr_test_data); i++)
+ if (branch_gen_test(&arcgenbr_test_data[i]))
+ return -EFAULT;
+
+ pr_info(SELFTEST_MSG "OK\n");
+
+ return 0;
+}
+early_initcall(instr_gen_test);
+
+#endif /* CONFIG_ARC_DBG_JUMP_LABEL */
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
new file mode 100644
index 0000000000..4f2b595145
--- /dev/null
+++ b/arch/arc/kernel/kgdb.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/kgdb.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <asm/disasm.h>
+#include <asm/cacheflush.h>
+
+static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+ struct callee_regs *cregs)
+{
+ int regno;
+
+ for (regno = 0; regno <= 26; regno++)
+ gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs);
+
+ for (regno = 27; regno < GDB_MAX_REGS; regno++)
+ gdb_regs[regno] = 0;
+
+ gdb_regs[_FP] = kernel_regs->fp;
+ gdb_regs[__SP] = kernel_regs->sp;
+ gdb_regs[_BLINK] = kernel_regs->blink;
+ gdb_regs[_RET] = kernel_regs->ret;
+ gdb_regs[_STATUS32] = kernel_regs->status32;
+ gdb_regs[_LP_COUNT] = kernel_regs->lp_count;
+ gdb_regs[_LP_END] = kernel_regs->lp_end;
+ gdb_regs[_LP_START] = kernel_regs->lp_start;
+ gdb_regs[_BTA] = kernel_regs->bta;
+ gdb_regs[_STOP_PC] = kernel_regs->ret;
+}
+
+static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+ struct callee_regs *cregs)
+{
+ int regno;
+
+ for (regno = 0; regno <= 26; regno++)
+ set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs);
+
+ kernel_regs->fp = gdb_regs[_FP];
+ kernel_regs->sp = gdb_regs[__SP];
+ kernel_regs->blink = gdb_regs[_BLINK];
+ kernel_regs->ret = gdb_regs[_RET];
+ kernel_regs->status32 = gdb_regs[_STATUS32];
+ kernel_regs->lp_count = gdb_regs[_LP_COUNT];
+ kernel_regs->lp_end = gdb_regs[_LP_END];
+ kernel_regs->lp_start = gdb_regs[_LP_START];
+ kernel_regs->bta = gdb_regs[_BTA];
+}
+
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+ to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+ current->thread.callee_reg);
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+ from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+ current->thread.callee_reg);
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
+ struct task_struct *task)
+{
+ if (task)
+ to_gdb_regs(gdb_regs, task_pt_regs(task),
+ (struct callee_regs *) task->thread.callee_reg);
+}
+
+struct single_step_data_t {
+ uint16_t opcode[2];
+ unsigned long address[2];
+ int is_branch;
+ int armed;
+} single_step_data;
+
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (single_step_data.armed) {
+ int i;
+
+ for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) {
+ memcpy((void *) single_step_data.address[i],
+ &single_step_data.opcode[i],
+ BREAK_INSTR_SIZE);
+
+ flush_icache_range(single_step_data.address[i],
+ single_step_data.address[i] +
+ BREAK_INSTR_SIZE);
+ }
+ single_step_data.armed = 0;
+ }
+}
+
+static void place_trap(unsigned long address, void *save)
+{
+ memcpy(save, (void *) address, BREAK_INSTR_SIZE);
+ memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+ flush_icache_range(address, address + BREAK_INSTR_SIZE);
+}
+
+static void do_single_step(struct pt_regs *regs)
+{
+ single_step_data.is_branch = disasm_next_pc((unsigned long)
+ regs->ret, regs, (struct callee_regs *)
+ current->thread.callee_reg,
+ &single_step_data.address[0],
+ &single_step_data.address[1]);
+
+ place_trap(single_step_data.address[0], &single_step_data.opcode[0]);
+
+ if (single_step_data.is_branch) {
+ place_trap(single_step_data.address[1],
+ &single_step_data.opcode[1]);
+ }
+
+ single_step_data.armed++;
+}
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ char *remcomInBuffer, char *remcomOutBuffer,
+ struct pt_regs *regs)
+{
+ unsigned long addr;
+ char *ptr;
+
+ undo_single_step(regs);
+
+ switch (remcomInBuffer[0]) {
+ case 's':
+ case 'c':
+ ptr = &remcomInBuffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->ret = addr;
+ fallthrough;
+
+ case 'D':
+ case 'k':
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+ if (remcomInBuffer[0] == 's') {
+ do_single_step(regs);
+ atomic_set(&kgdb_cpu_doing_single_step,
+ smp_processor_id());
+ }
+
+ return 0;
+ }
+ return -1;
+}
+
+int kgdb_arch_init(void)
+{
+ single_step_data.armed = 0;
+ return 0;
+}
+
+void kgdb_trap(struct pt_regs *regs)
+{
+ /* trap_s 3 is used for breakpoints that overwrite existing
+ * instructions, while trap_s 4 is used for compiled breakpoints.
+ *
+ * with trap_s 3 breakpoints the original instruction needs to be
+ * restored and continuation needs to start at the location of the
+ * breakpoint.
+ *
+ * with trap_s 4 (compiled) breakpoints, continuation needs to
+ * start after the breakpoint.
+ */
+ if (regs->ecr.param == 3)
+ instruction_pointer(regs) -= BREAK_INSTR_SIZE;
+
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+}
+
+void kgdb_arch_exit(void)
+{
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ instruction_pointer(regs) = ip;
+}
+
+void kgdb_call_nmi_hook(void *ignored)
+{
+ /* Default implementation passes get_irq_regs() but we don't */
+ kgdb_nmicallback(raw_smp_processor_id(), NULL);
+}
+
+const struct kgdb_arch arch_kgdb_ops = {
+ /* breakpoint instruction: TRAP_S 0x3 */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ .gdb_bpt_instr = {0x78, 0x7e},
+#else
+ .gdb_bpt_instr = {0x7e, 0x78},
+#endif
+};
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
new file mode 100644
index 0000000000..e71d64119d
--- /dev/null
+++ b/arch/arc/kernel/kprobes.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/current.h>
+#include <asm/disasm.h>
+
+#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
+ (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ /* Attempt to probe at unaligned address */
+ if ((unsigned long)p->addr & 0x01)
+ return -EINVAL;
+
+ /* Address should not be in exception handling code */
+
+ p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
+ p->opcode = *p->addr;
+
+ return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = UNIMP_S_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ arch_disarm_kprobe(p);
+
+ /* Can we remove the kprobe in the middle of kprobe handling? */
+ if (p->ainsn.t1_addr) {
+ *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t1_addr = NULL;
+ }
+
+ if (p->ainsn.t2_addr) {
+ *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t2_addr = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static inline void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
+ struct pt_regs *regs)
+{
+ /* Remove the trap instructions inserted for single step and
+ * restore the original instructions
+ */
+ if (p->ainsn.t1_addr) {
+ *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t1_addr = NULL;
+ }
+
+ if (p->ainsn.t2_addr) {
+ *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t2_addr = NULL;
+ }
+
+ return;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long next_pc;
+ unsigned long tgt_if_br = 0;
+ int is_branch;
+ unsigned long bta;
+
+ /* Copy the opcode back to the kprobe location and execute the
+ * instruction. Because of this we will not be able to get into the
+ * same kprobe until this kprobe is done
+ */
+ *(p->addr) = p->opcode;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+
+ /* Now we insert the trap at the next location after this instruction to
+ * single step. If it is a branch we insert the trap at possible branch
+ * targets
+ */
+
+ bta = regs->bta;
+
+ if (regs->status32 & 0x40) {
+ /* We are in a delay slot with the branch taken */
+
+ next_pc = bta & ~0x01;
+
+ if (!p->ainsn.is_short) {
+ if (bta & 0x01)
+ regs->blink += 2;
+ else {
+ /* Branch not taken */
+ next_pc += 2;
+
+ /* next pc is taken from bta after executing the
+ * delay slot instruction
+ */
+ regs->bta += 2;
+ }
+ }
+
+ is_branch = 0;
+ } else
+ is_branch =
+ disasm_next_pc((unsigned long)p->addr, regs,
+ (struct callee_regs *) current->thread.callee_reg,
+ &next_pc, &tgt_if_br);
+
+ p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
+ p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
+ *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ if (is_branch) {
+ p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
+ p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
+ *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+ }
+}
+
+int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ preempt_disable();
+
+ kcb = get_kprobe_ctlblk();
+ p = get_kprobe((unsigned long *)addr);
+
+ if (p) {
+ /*
+ * We have reentered the kprobe_handler, since another kprobe
+ * was hit while within the handler, we save the original
+ * kprobes and single step on the instruction of the new probe
+ * without calling any user handlers to avoid recursive
+ * kprobes.
+ */
+ if (kprobe_running()) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ }
+
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /* If we have no pre-handler or it returned 0, we continue with
+ * normal processing. If we have a pre-handler and it returned
+ * non-zero - which means user handler setup registers to exit
+ * to another instruction, we must skip the single stepping.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ } else {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+
+ return 1;
+ }
+
+ /* no_kprobe: */
+ preempt_enable_no_resched();
+ return 0;
+}
+
+static int __kprobes arc_post_kprobe_handler(unsigned long addr,
+ struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ resume_execution(cur, addr, regs);
+
+ /* Rearm the kprobe */
+ arch_arm_kprobe(cur);
+
+ /*
+ * When we return from trap instruction we go to the next instruction
+ * We restored the actual instruction in resume_exectuiont and we to
+ * return to the same address and execute it
+ */
+ regs->ret = addr;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+
+ reset_current_kprobe();
+
+out:
+ preempt_enable_no_resched();
+ return 1;
+}
+
+/*
+ * Fault can be for the instruction being single stepped or for the
+ * pre/post handlers in the module.
+ * This is applicable for applications like user probes, where we have the
+ * probe in user space and the handlers in the kernel
+ */
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single stepped
+ * caused the fault. We reset the current kprobe and allow the
+ * exception handler as if it is regular exception. In our
+ * case it doesn't matter because the system will be halted
+ */
+ resume_execution(cur, (unsigned long)cur->addr, regs);
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ preempt_enable_no_resched();
+ break;
+
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We are here because the instructions in the pre/post handler
+ * caused the fault.
+ */
+
+ /*
+ * In case the user-specified fault handler returned zero,
+ * try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ unsigned long addr = args->err;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_IERR:
+ if (arc_kprobe_handler(addr, args->regs))
+ return NOTIFY_STOP;
+ break;
+
+ case DIE_TRAP:
+ if (arc_post_kprobe_handler(addr, args->regs))
+ return NOTIFY_STOP;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void __used kretprobe_trampoline_holder(void)
+{
+ __asm__ __volatile__(".global __kretprobe_trampoline\n"
+ "__kretprobe_trampoline:\n"
+ "nop\n");
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+
+ ri->ret_addr = (kprobe_opcode_t *) regs->blink;
+ ri->fp = NULL;
+
+ /* Replace the return addr with trampoline addr */
+ regs->blink = (unsigned long)&__kretprobe_trampoline;
+}
+
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ regs->ret = __kretprobe_trampoline_handler(regs, NULL);
+
+ /* By returning a non zero value, we are telling the kprobe handler
+ * that we don't want the post_handler to run
+ */
+ return 1;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ /* Registering the trampoline code for the kret probe */
+ return register_kprobe(&trampoline_p);
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *) &__kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+
+void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
+{
+ notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);
+}
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
new file mode 100644
index 0000000000..55373ca0d2
--- /dev/null
+++ b/arch/arc/kernel/mcip.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/spinlock.h>
+#include <soc/arc/mcip.h>
+#include <asm/irqflags-arcv2.h>
+#include <asm/setup.h>
+
+static DEFINE_RAW_SPINLOCK(mcip_lock);
+
+#ifdef CONFIG_SMP
+
+static char smp_cpuinfo_buf[128];
+
+/*
+ * Set mask to halt GFRC if any online core in SMP cluster is halted.
+ * Only works for ARC HS v3.0+, on earlier versions has no effect.
+ */
+static void mcip_update_gfrc_halt_mask(int cpu)
+{
+ struct bcr_generic gfrc;
+ unsigned long flags;
+ u32 gfrc_halt_mask;
+
+ READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
+
+ /*
+ * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
+ * GFRC 0x3 version.
+ */
+ if (gfrc.ver < 0x3)
+ return;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ __mcip_cmd(CMD_GFRC_READ_CORE, 0);
+ gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
+ gfrc_halt_mask |= BIT(cpu);
+ __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void mcip_update_debug_halt_mask(int cpu)
+{
+ u32 mcip_mask = 0;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ /*
+ * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
+ * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
+ * and CMD_DEBUG_READ_SELECT.
+ */
+ __mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
+ mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+ mcip_mask |= BIT(cpu);
+
+ __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
+ /*
+ * Parameter specified halt cause:
+ * STATUS32[H]/actionpoint/breakpoint/self-halt
+ * We choose all of them (0xF).
+ */
+ __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void mcip_setup_per_cpu(int cpu)
+{
+ struct mcip_bcr mp;
+
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
+
+ smp_ipi_irq_setup(cpu, IPI_IRQ);
+ smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
+
+ /* Update GFRC halt mask as new CPU came online */
+ if (mp.gfrc)
+ mcip_update_gfrc_halt_mask(cpu);
+
+ /* Update MCIP debug mask as new CPU came online */
+ if (mp.dbg)
+ mcip_update_debug_halt_mask(cpu);
+}
+
+static void mcip_ipi_send(int cpu)
+{
+ unsigned long flags;
+ int ipi_was_pending;
+
+ /* ARConnect can only send IPI to others */
+ if (unlikely(cpu == raw_smp_processor_id())) {
+ arc_softirq_trigger(SOFTIRQ_IRQ);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ /*
+ * If receiver already has a pending interrupt, elide sending this one.
+ * Linux cross core calling works well with concurrent IPIs
+ * coalesced into one
+ * see arch/arc/kernel/smp.c: ipi_send_msg_one()
+ */
+ __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
+ ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
+ if (!ipi_was_pending)
+ __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void mcip_ipi_clear(int irq)
+{
+ unsigned int cpu, c;
+ unsigned long flags;
+
+ if (unlikely(irq == SOFTIRQ_IRQ)) {
+ arc_softirq_clear(irq);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ /* Who sent the IPI */
+ __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
+
+ cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
+
+ /*
+ * In rare case, multiple concurrent IPIs sent to same target can
+ * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
+ * "vectored" (multiple bits sets) as opposed to typical single bit
+ */
+ do {
+ c = __ffs(cpu); /* 0,1,2,3 */
+ __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
+ cpu &= ~(1U << c);
+ } while (cpu);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void mcip_probe_n_setup(void)
+{
+ struct mcip_bcr mp;
+
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
+
+ sprintf(smp_cpuinfo_buf,
+ "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
+ mp.ver, mp.num_cores,
+ IS_AVAIL1(mp.ipi, "IPI "),
+ IS_AVAIL1(mp.idu, "IDU "),
+ IS_AVAIL1(mp.dbg, "DEBUG "),
+ IS_AVAIL1(mp.gfrc, "GFRC"));
+}
+
+struct plat_smp_ops plat_smp_ops = {
+ .info = smp_cpuinfo_buf,
+ .init_early_smp = mcip_probe_n_setup,
+ .init_per_cpu = mcip_setup_per_cpu,
+ .ipi_send = mcip_ipi_send,
+ .ipi_clear = mcip_ipi_clear,
+};
+
+#endif
+
+/***************************************************************************
+ * ARCv2 Interrupt Distribution Unit (IDU)
+ *
+ * Connects external "COMMON" IRQs to core intc, providing:
+ * -dynamic routing (IRQ affinity)
+ * -load balancing (Round Robin interrupt distribution)
+ * -1:N distribution
+ *
+ * It physically resides in the MCIP hw block
+ */
+
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+/*
+ * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
+ */
+static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
+{
+ __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
+}
+
+static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
+ bool set_distr, unsigned int distr)
+{
+ union {
+ unsigned int word;
+ struct {
+ unsigned int distr:2, pad:2, lvl:1, pad2:27;
+ };
+ } data;
+
+ data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
+ if (set_distr)
+ data.distr = distr;
+ if (set_lvl)
+ data.lvl = lvl;
+ __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
+}
+
+static void idu_irq_mask_raw(irq_hw_number_t hwirq)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+ __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1);
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void idu_irq_mask(struct irq_data *data)
+{
+ idu_irq_mask_raw(data->hwirq);
+}
+
+static void idu_irq_unmask(struct irq_data *data)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+ __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void idu_irq_ack(struct irq_data *data)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+ __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void idu_irq_mask_ack(struct irq_data *data)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+ __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
+ __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static int
+idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned long flags;
+ cpumask_t online;
+ unsigned int destination_bits;
+ unsigned int distribution_mode;
+
+ /* errout if no online cpu per @cpumask */
+ if (!cpumask_and(&online, cpumask, cpu_online_mask))
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ destination_bits = cpumask_bits(&online)[0];
+ idu_set_dest(data->hwirq, destination_bits);
+
+ if (ffs(destination_bits) == fls(destination_bits))
+ distribution_mode = IDU_M_DISTRI_DEST;
+ else
+ distribution_mode = IDU_M_DISTRI_RR;
+
+ idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int idu_irq_set_type(struct irq_data *data, u32 type)
+{
+ unsigned long flags;
+
+ /*
+ * ARCv2 IDU HW does not support inverse polarity, so these are the
+ * only interrupt types supported.
+ */
+ if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ idu_set_mode(data->hwirq, true,
+ type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
+ IDU_M_TRIG_LEVEL,
+ false, 0);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
+ return 0;
+}
+
+static void idu_irq_enable(struct irq_data *data)
+{
+ /*
+ * By default send all common interrupts to all available online CPUs.
+ * The affinity of common interrupts in IDU must be set manually since
+ * in some cases the kernel will not call irq_set_affinity() by itself:
+ * 1. When the kernel is not configured with support of SMP.
+ * 2. When the kernel is configured with support of SMP but upper
+ * interrupt controllers does not support setting of the affinity
+ * and cannot propagate it to IDU.
+ */
+ idu_irq_set_affinity(data, cpu_online_mask, false);
+ idu_irq_unmask(data);
+}
+
+static struct irq_chip idu_irq_chip = {
+ .name = "MCIP IDU Intc",
+ .irq_mask = idu_irq_mask,
+ .irq_unmask = idu_irq_unmask,
+ .irq_ack = idu_irq_ack,
+ .irq_mask_ack = idu_irq_mask_ack,
+ .irq_enable = idu_irq_enable,
+ .irq_set_type = idu_irq_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = idu_irq_set_affinity,
+#endif
+
+};
+
+static void idu_cascade_isr(struct irq_desc *desc)
+{
+ struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *core_chip = irq_desc_get_chip(desc);
+ irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
+ irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ;
+
+ chained_irq_enter(core_chip, desc);
+ generic_handle_domain_irq(idu_domain, idu_hwirq);
+ chained_irq_exit(core_chip, desc);
+}
+
+static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
+
+ return 0;
+}
+
+static const struct irq_domain_ops idu_irq_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = idu_irq_map,
+};
+
+/*
+ * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
+ * [24, 23+C]: If C > 0 then "C" common IRQs
+ * [24+C, N]: Not statically assigned, private-per-core
+ */
+
+
+static int __init
+idu_of_init(struct device_node *intc, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ int nr_irqs;
+ int i, virq;
+ struct mcip_bcr mp;
+ struct mcip_idu_bcr idu_bcr;
+
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
+
+ if (!mp.idu)
+ panic("IDU not detected, but DeviceTree using it");
+
+ READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr);
+ nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr);
+
+ pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs);
+
+ domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
+
+ /* Parent interrupts (core-intc) are already mapped */
+
+ for (i = 0; i < nr_irqs; i++) {
+ /* Mask all common interrupts by default */
+ idu_irq_mask_raw(i);
+
+ /*
+ * Return parent uplink IRQs (towards core intc) 24,25,.....
+ * this step has been done before already
+ * however we need it to get the parent virq and set IDU handler
+ * as first level isr
+ */
+ virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ);
+ BUG_ON(!virq);
+ irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
+ }
+
+ __mcip_cmd(CMD_IDU_ENABLE, 0);
+
+ return 0;
+}
+IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
new file mode 100644
index 0000000000..c90c279047
--- /dev/null
+++ b/arch/arc/kernel/module.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <asm/unwind.h>
+
+static inline void arc_write_me(unsigned short *addr, unsigned long value)
+{
+ *addr = (value & 0xffff0000) >> 16;
+ *(addr + 1) = (value & 0xffff);
+}
+
+/*
+ * This gets called before relocation loop in generic loader
+ * Make a note of the section index of unwinding section
+ */
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstr, struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ mod->arch.unw_sec_idx = 0;
+ mod->arch.unw_info = NULL;
+#endif
+ mod->arch.secstr = secstr;
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ if (mod->arch.unw_info)
+ unwind_remove_table(mod->arch.unw_info, 0);
+#endif
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex, /* sec index for sym tbl */
+ unsigned int relsec, /* sec index for relo sec */
+ struct module *module)
+{
+ int i, n, relo_type;
+ Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym_entry, *sym_sec;
+ Elf32_Addr relocation, location, tgt_addr;
+ unsigned int tgtsec;
+
+ /*
+ * @relsec has relocations e.g. .rela.init.text
+ * @tgtsec is section to patch e.g. .init.text
+ */
+ tgtsec = sechdrs[relsec].sh_info;
+ tgt_addr = sechdrs[tgtsec].sh_addr;
+ sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
+ n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
+
+ pr_debug("\nSection to fixup %s @%x\n",
+ module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
+ pr_debug("=========================================================\n");
+ pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");
+ pr_debug("=========================================================\n");
+
+ /* Loop thru entries in relocation section */
+ for (i = 0; i < n; i++) {
+ const char *s;
+
+ /* This is where to make the change */
+ location = tgt_addr + rel_entry[i].r_offset;
+
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym_entry = sym_sec + ELF32_R_SYM(rel_entry[i].r_info);
+
+ relocation = sym_entry->st_value + rel_entry[i].r_addend;
+
+ if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
+ s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
+ } else {
+ s = strtab + sym_entry->st_name;
+ }
+
+ pr_debug(" %x\t%x\t%x %x %x [%s]\n",
+ rel_entry[i].r_offset, rel_entry[i].r_addend,
+ sym_entry->st_value, location, relocation, s);
+
+ /* This assumes modules are built with -mlong-calls
+ * so any branches/jumps are absolute 32 bit jmps
+ * global data access again is abs 32 bit.
+ * Both of these are handled by same relocation type
+ */
+ relo_type = ELF32_R_TYPE(rel_entry[i].r_info);
+
+ if (likely(R_ARC_32_ME == relo_type)) /* ME ( S + A ) */
+ arc_write_me((unsigned short *)location, relocation);
+ else if (R_ARC_32 == relo_type) /* ( S + A ) */
+ *((Elf32_Addr *) location) = relocation;
+ else if (R_ARC_32_PCREL == relo_type) /* ( S + A ) - PDATA ) */
+ *((Elf32_Addr *) location) = relocation - location;
+ else
+ goto relo_err;
+
+ }
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+ if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
+ module->arch.unw_sec_idx = tgtsec;
+#endif
+
+ return 0;
+
+relo_err:
+ pr_err("%s: unknown relocation: %u\n",
+ module->name, ELF32_R_TYPE(rel_entry[i].r_info));
+ return -ENOEXEC;
+
+}
+
+/* Just before lift off: After sections have been relocated, we add the
+ * dwarf section to unwinder table pool
+ * This couldn't be done in module_frob_arch_sections() because
+ * relocations had not been applied by then
+ */
+int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ void *unw;
+ int unwsec = mod->arch.unw_sec_idx;
+
+ if (unwsec) {
+ unw = unwind_add_table(mod, (void *)sechdrs[unwsec].sh_addr,
+ sechdrs[unwsec].sh_size);
+ mod->arch.unw_info = unw;
+ }
+#endif
+ return 0;
+}
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
new file mode 100644
index 0000000000..adff957962
--- /dev/null
+++ b/arch/arc/kernel/perf_event.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Linux performance counter support for ARC CPUs.
+// This code is inspired by the perf support of various other architectures.
+//
+// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
+
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <asm/arcregs.h>
+#include <asm/stacktrace.h>
+
+/* HW holds 8 symbols + one for null terminator */
+#define ARCPMU_EVENT_NAME_LEN 9
+
+/*
+ * Some ARC pct quirks:
+ *
+ * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+ * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+ * The ARC 700 can either measure stalls per pipeline stage, or all stalls
+ * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
+ * and all pipeline flushes (e.g. caused by mispredicts, etc.) to
+ * STALLED_CYCLES_FRONTEND.
+ *
+ * We could start multiple performance counters and combine everything
+ * afterwards, but that makes it complicated.
+ *
+ * Note that I$ cache misses aren't counted by either of the two!
+ */
+
+/*
+ * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
+ * (based on a specific RTL build)
+ * Below is the static map between perf generic/arc specific event_id and
+ * h/w condition names.
+ * At the time of probe, we loop thru each index and find it's name to
+ * complete the mapping of perf event_id to h/w index as latter is needed
+ * to program the counter really
+ */
+static const char * const arc_pmu_ev_hw_map[] = {
+ /* count cycles */
+ [PERF_COUNT_HW_CPU_CYCLES] = "crun",
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
+ [PERF_COUNT_HW_BUS_CYCLES] = "crun",
+
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
+
+ /* counts condition */
+ [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
+ /* All jump instructions that are taken */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
+#ifdef CONFIG_ISA_ARCV2
+ [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
+#else
+ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
+ [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
+#endif
+ [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
+ [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */
+
+ [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */
+ [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */
+ [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
+ [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
+ [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
+
+ [PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */
+ [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */
+};
+
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xffff
+
+static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
+ },
+ /* DTLB LD/ST Miss not segregated by h/w*/
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+ [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+enum arc_pmu_attr_groups {
+ ARCPMU_ATTR_GR_EVENTS,
+ ARCPMU_ATTR_GR_FORMATS,
+ ARCPMU_NR_ATTR_GR
+};
+
+struct arc_pmu_raw_event_entry {
+ char name[ARCPMU_EVENT_NAME_LEN];
+};
+
+struct arc_pmu {
+ struct pmu pmu;
+ unsigned int irq;
+ int n_counters;
+ int n_events;
+ u64 max_period;
+ int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
+
+ struct arc_pmu_raw_event_entry *raw_entry;
+ struct attribute **attrs;
+ struct perf_pmu_events_attr *attr;
+ const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
+};
+
+struct arc_pmu_cpu {
+ /*
+ * A 1 bit for an index indicates that the counter is being used for
+ * an event. A 0 means that the counter can be used.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(ARC_PERF_MAX_COUNTERS)];
+
+ /*
+ * The events that are active on the PMU for the given index.
+ */
+ struct perf_event *act_counter[ARC_PERF_MAX_COUNTERS];
+};
+
+struct arc_callchain_trace {
+ int depth;
+ void *perf_stuff;
+};
+
+static int callchain_trace(unsigned int addr, void *data)
+{
+ struct arc_callchain_trace *ctrl = data;
+ struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
+
+ perf_callchain_store(entry, addr);
+
+ if (ctrl->depth++ < 3)
+ return 0;
+
+ return -1;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct arc_callchain_trace ctrl = {
+ .depth = 0,
+ .perf_stuff = entry,
+ };
+
+ arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
+}
+
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ /*
+ * User stack can't be unwound trivially with kernel dwarf unwinder
+ * So for now just record the user PC
+ */
+ perf_callchain_store(entry, instruction_pointer(regs));
+}
+
+static struct arc_pmu *arc_pmu;
+static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
+
+/* read counter #idx; note that counter# != event# on ARC! */
+static u64 arc_pmu_read_counter(int idx)
+{
+ u32 tmp;
+ u64 result;
+
+ /*
+ * ARC supports making 'snapshots' of the counters, so we don't
+ * need to care about counters wrapping to 0 underneath our feet
+ */
+ write_aux_reg(ARC_REG_PCT_INDEX, idx);
+ tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
+ write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
+ result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
+ result |= read_aux_reg(ARC_REG_PCT_SNAPL);
+
+ return result;
+}
+
+static void arc_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ u64 prev_raw_count = local64_read(&hwc->prev_count);
+ u64 new_raw_count = arc_pmu_read_counter(idx);
+ s64 delta = new_raw_count - prev_raw_count;
+
+ /*
+ * We aren't afraid of hwc->prev_count changing beneath our feet
+ * because there's no way for us to re-enter this function anytime.
+ */
+ local64_set(&hwc->prev_count, new_raw_count);
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static void arc_pmu_read(struct perf_event *event)
+{
+ arc_perf_event_update(event, &event->hw, event->hw.idx);
+}
+
+static int arc_pmu_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ int ret;
+
+ cache_type = (config >> 0) & 0xff;
+ cache_op = (config >> 8) & 0xff;
+ cache_result = (config >> 16) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return -EINVAL;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return -EINVAL;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
+
+ if (ret == CACHE_OP_UNSUPPORTED)
+ return -ENOENT;
+
+ pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n",
+ cache_type, cache_op, cache_result, ret,
+ arc_pmu_ev_hw_map[ret]);
+
+ return ret;
+}
+
+/* initializes hw_perf_event structure if event is supported */
+static int arc_pmu_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int ret;
+
+ if (!is_sampling_event(event)) {
+ hwc->sample_period = arc_pmu->max_period;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ hwc->config = 0;
+
+ if (is_isa_arcv2()) {
+ /* "exclude user" means "count only kernel" */
+ if (event->attr.exclude_user)
+ hwc->config |= ARC_REG_PCT_CONFIG_KERN;
+
+ /* "exclude kernel" means "count only user" */
+ if (event->attr.exclude_kernel)
+ hwc->config |= ARC_REG_PCT_CONFIG_USER;
+ }
+
+ switch (event->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -ENOENT;
+ if (arc_pmu->ev_hw_idx[event->attr.config] < 0)
+ return -ENOENT;
+ hwc->config |= arc_pmu->ev_hw_idx[event->attr.config];
+ pr_debug("init event %d with h/w %08x \'%s\'\n",
+ (int)event->attr.config, (int)hwc->config,
+ arc_pmu_ev_hw_map[event->attr.config]);
+ return 0;
+
+ case PERF_TYPE_HW_CACHE:
+ ret = arc_pmu_cache_event(event->attr.config);
+ if (ret < 0)
+ return ret;
+ hwc->config |= arc_pmu->ev_hw_idx[ret];
+ pr_debug("init cache event with h/w %08x \'%s\'\n",
+ (int)hwc->config, arc_pmu_ev_hw_map[ret]);
+ return 0;
+
+ case PERF_TYPE_RAW:
+ if (event->attr.config >= arc_pmu->n_events)
+ return -ENOENT;
+
+ hwc->config |= event->attr.config;
+ pr_debug("init raw event with idx %lld \'%s\'\n",
+ event->attr.config,
+ arc_pmu->raw_entry[event->attr.config].name);
+
+ return 0;
+
+ default:
+ return -ENOENT;
+ }
+}
+
+/* starts all counters */
+static void arc_pmu_enable(struct pmu *pmu)
+{
+ u32 tmp;
+ tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
+ write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
+}
+
+/* stops all counters */
+static void arc_pmu_disable(struct pmu *pmu)
+{
+ u32 tmp;
+ tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
+ write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
+}
+
+static int arc_pmu_event_set_period(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ s64 left = local64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+ int idx = hwc->idx;
+ int overflow = 0;
+ u64 value;
+
+ if (unlikely(left <= -period)) {
+ /* left underflowed by more than period. */
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ overflow = 1;
+ } else if (unlikely(left <= 0)) {
+ /* left underflowed by less than period. */
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ overflow = 1;
+ }
+
+ if (left > arc_pmu->max_period)
+ left = arc_pmu->max_period;
+
+ value = arc_pmu->max_period - left;
+ local64_set(&hwc->prev_count, value);
+
+ /* Select counter */
+ write_aux_reg(ARC_REG_PCT_INDEX, idx);
+
+ /* Write value */
+ write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
+ write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
+
+ perf_event_update_userpage(event);
+
+ return overflow;
+}
+
+/*
+ * Assigns hardware counter to hardware condition.
+ * Note that there is no separate start/stop mechanism;
+ * stopping is achieved by assigning the 'never' condition
+ */
+static void arc_pmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (WARN_ON_ONCE(idx == -1))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ arc_pmu_event_set_period(event);
+
+ /* Enable interrupt for this counter */
+ if (is_sampling_event(event))
+ write_aux_reg(ARC_REG_PCT_INT_CTRL,
+ read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
+
+ /* enable ARC pmu here */
+ write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
+ write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config); /* condition */
+}
+
+static void arc_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ /* Disable interrupt for this counter */
+ if (is_sampling_event(event)) {
+ /*
+ * Reset interrupt flag by writing of 1. This is required
+ * to make sure pending interrupt was not left.
+ */
+ write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
+ write_aux_reg(ARC_REG_PCT_INT_CTRL,
+ read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
+ }
+
+ if (!(event->hw.state & PERF_HES_STOPPED)) {
+ /* stop hw counter here */
+ write_aux_reg(ARC_REG_PCT_INDEX, idx);
+
+ /* condition code #0 is always "never" */
+ write_aux_reg(ARC_REG_PCT_CONFIG, 0);
+
+ event->hw.state |= PERF_HES_STOPPED;
+ }
+
+ if ((flags & PERF_EF_UPDATE) &&
+ !(event->hw.state & PERF_HES_UPTODATE)) {
+ arc_perf_event_update(event, &event->hw, idx);
+ event->hw.state |= PERF_HES_UPTODATE;
+ }
+}
+
+static void arc_pmu_del(struct perf_event *event, int flags)
+{
+ struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
+
+ arc_pmu_stop(event, PERF_EF_UPDATE);
+ __clear_bit(event->hw.idx, pmu_cpu->used_mask);
+
+ pmu_cpu->act_counter[event->hw.idx] = 0;
+
+ perf_event_update_userpage(event);
+}
+
+/* allocate hardware counter and optionally start counting */
+static int arc_pmu_add(struct perf_event *event, int flags)
+{
+ struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ idx = ffz(pmu_cpu->used_mask[0]);
+ if (idx == arc_pmu->n_counters)
+ return -EAGAIN;
+
+ __set_bit(idx, pmu_cpu->used_mask);
+ hwc->idx = idx;
+
+ write_aux_reg(ARC_REG_PCT_INDEX, idx);
+
+ pmu_cpu->act_counter[idx] = event;
+
+ if (is_sampling_event(event)) {
+ /* Mimic full counter overflow as other arches do */
+ write_aux_reg(ARC_REG_PCT_INT_CNTL,
+ lower_32_bits(arc_pmu->max_period));
+ write_aux_reg(ARC_REG_PCT_INT_CNTH,
+ upper_32_bits(arc_pmu->max_period));
+ }
+
+ write_aux_reg(ARC_REG_PCT_CONFIG, 0);
+ write_aux_reg(ARC_REG_PCT_COUNTL, 0);
+ write_aux_reg(ARC_REG_PCT_COUNTH, 0);
+ local64_set(&hwc->prev_count, 0);
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (flags & PERF_EF_START)
+ arc_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+#ifdef CONFIG_ISA_ARCV2
+static irqreturn_t arc_pmu_intr(int irq, void *dev)
+{
+ struct perf_sample_data data;
+ struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
+ struct pt_regs *regs;
+ unsigned int active_ints;
+ int idx;
+
+ arc_pmu_disable(&arc_pmu->pmu);
+
+ active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT);
+ if (!active_ints)
+ goto done;
+
+ regs = get_irq_regs();
+
+ do {
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
+
+ idx = __ffs(active_ints);
+
+ /* Reset interrupt flag by writing of 1 */
+ write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
+
+ /*
+ * On reset of "interrupt active" bit corresponding
+ * "interrupt enable" bit gets automatically reset as well.
+ * Now we need to re-enable interrupt for the counter.
+ */
+ write_aux_reg(ARC_REG_PCT_INT_CTRL,
+ read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
+
+ event = pmu_cpu->act_counter[idx];
+ hwc = &event->hw;
+
+ WARN_ON_ONCE(hwc->idx != idx);
+
+ arc_perf_event_update(event, &event->hw, event->hw.idx);
+ perf_sample_data_init(&data, 0, hwc->last_period);
+ if (arc_pmu_event_set_period(event)) {
+ if (perf_event_overflow(event, &data, regs))
+ arc_pmu_stop(event, 0);
+ }
+
+ active_ints &= ~BIT(idx);
+ } while (active_ints);
+
+done:
+ arc_pmu_enable(&arc_pmu->pmu);
+
+ return IRQ_HANDLED;
+}
+#else
+
+static irqreturn_t arc_pmu_intr(int irq, void *dev)
+{
+ return IRQ_NONE;
+}
+
+#endif /* CONFIG_ISA_ARCV2 */
+
+static void arc_cpu_pmu_irq_init(void *data)
+{
+ int irq = *(int *)data;
+
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+
+ /* Clear all pending interrupt flags */
+ write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
+}
+
+/* Event field occupies the bottom 15 bits of our config field */
+PMU_FORMAT_ATTR(event, "config:0-14");
+static struct attribute *arc_pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group arc_pmu_format_attr_gr = {
+ .name = "format",
+ .attrs = arc_pmu_format_attrs,
+};
+
+static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
+}
+
+/*
+ * We don't add attrs here as we don't have pre-defined list of perf events.
+ * We will generate and add attrs dynamically in probe() after we read HW
+ * configuration.
+ */
+static struct attribute_group arc_pmu_events_attr_gr = {
+ .name = "events",
+};
+
+static void arc_pmu_add_raw_event_attr(int j, char *str)
+{
+ memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
+ arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
+ arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
+ arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
+ arc_pmu->attr[j].id = j;
+ arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
+}
+
+static int arc_pmu_raw_alloc(struct device *dev)
+{
+ arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
+ sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
+ if (!arc_pmu->attr)
+ return -ENOMEM;
+
+ arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
+ sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
+ if (!arc_pmu->attrs)
+ return -ENOMEM;
+
+ arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
+ sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
+ if (!arc_pmu->raw_entry)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline bool event_in_hw_event_map(int i, char *name)
+{
+ if (!arc_pmu_ev_hw_map[i])
+ return false;
+
+ if (!strlen(arc_pmu_ev_hw_map[i]))
+ return false;
+
+ if (strcmp(arc_pmu_ev_hw_map[i], name))
+ return false;
+
+ return true;
+}
+
+static void arc_pmu_map_hw_event(int j, char *str)
+{
+ int i;
+
+ /* See if HW condition has been mapped to a perf event_id */
+ for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
+ if (event_in_hw_event_map(i, str)) {
+ pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
+ i, str, j);
+ arc_pmu->ev_hw_idx[i] = j;
+ }
+ }
+}
+
+static int arc_pmu_device_probe(struct platform_device *pdev)
+{
+ struct arc_reg_pct_build pct_bcr;
+ struct arc_reg_cc_build cc_bcr;
+ int i, has_interrupts, irq = -1;
+ int counter_size; /* in bits */
+
+ union cc_name {
+ struct {
+ u32 word0, word1;
+ char sentinel;
+ } indiv;
+ char str[ARCPMU_EVENT_NAME_LEN];
+ } cc_name;
+
+
+ READ_BCR(ARC_REG_PCT_BUILD, pct_bcr);
+ if (!pct_bcr.v) {
+ pr_err("This core does not have performance counters!\n");
+ return -ENODEV;
+ }
+ BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
+ if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
+ return -EINVAL;
+
+ READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
+ if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
+ return -EINVAL;
+
+ arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
+ if (!arc_pmu)
+ return -ENOMEM;
+
+ arc_pmu->n_events = cc_bcr.c;
+
+ if (arc_pmu_raw_alloc(&pdev->dev))
+ return -ENOMEM;
+
+ has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
+
+ arc_pmu->n_counters = pct_bcr.c;
+ counter_size = 32 + (pct_bcr.s << 4);
+
+ arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL;
+
+ pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
+ arc_pmu->n_counters, counter_size, cc_bcr.c,
+ has_interrupts ? ", [overflow IRQ support]" : "");
+
+ cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
+ for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
+ arc_pmu->ev_hw_idx[i] = -1;
+
+ /* loop thru all available h/w condition indexes */
+ for (i = 0; i < cc_bcr.c; i++) {
+ write_aux_reg(ARC_REG_CC_INDEX, i);
+ cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
+ cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
+
+ arc_pmu_map_hw_event(i, cc_name.str);
+ arc_pmu_add_raw_event_attr(i, cc_name.str);
+ }
+
+ arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
+ arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
+ arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
+
+ arc_pmu->pmu = (struct pmu) {
+ .pmu_enable = arc_pmu_enable,
+ .pmu_disable = arc_pmu_disable,
+ .event_init = arc_pmu_event_init,
+ .add = arc_pmu_add,
+ .del = arc_pmu_del,
+ .start = arc_pmu_start,
+ .stop = arc_pmu_stop,
+ .read = arc_pmu_read,
+ .attr_groups = arc_pmu->attr_groups,
+ };
+
+ if (has_interrupts) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0) {
+ int ret;
+
+ arc_pmu->irq = irq;
+
+ /* intc map function ensures irq_set_percpu_devid() called */
+ ret = request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
+ this_cpu_ptr(&arc_pmu_cpu));
+
+ if (!ret)
+ on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
+ else
+ irq = -1;
+ }
+
+ }
+
+ if (irq == -1)
+ arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
+ /*
+ * perf parser doesn't really like '-' symbol in events name, so let's
+ * use '_' in arc pct name as it goes to kernel PMU event prefix.
+ */
+ return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
+}
+
+static const struct of_device_id arc_pmu_match[] = {
+ { .compatible = "snps,arc700-pct" },
+ { .compatible = "snps,archs-pct" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, arc_pmu_match);
+
+static struct platform_driver arc_pmu_driver = {
+ .driver = {
+ .name = "arc-pct",
+ .of_match_table = of_match_ptr(arc_pmu_match),
+ },
+ .probe = arc_pmu_device_probe,
+};
+
+module_platform_driver(arc_pmu_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>");
+MODULE_DESCRIPTION("ARC PMU driver");
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
new file mode 100644
index 0000000000..186ceab661
--- /dev/null
+++ b/arch/arc/kernel/process.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Amit Bhor, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/elf.h>
+#include <linux/tick.h>
+
+#include <asm/fpu.h>
+
+SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
+{
+ task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
+ return 0;
+}
+
+/*
+ * We return the user space TLS data ptr as sys-call return code
+ * Ideally it should be copy to user.
+ * However we can cheat by the fact that some sys-calls do return
+ * absurdly high values
+ * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
+ * it won't be considered a sys-call error
+ * and it will be loads better than copy-to-user, which is a definite
+ * D-TLB Miss
+ */
+SYSCALL_DEFINE0(arc_gettls)
+{
+ return task_thread_info(current)->thr_ptr;
+}
+
+SYSCALL_DEFINE3(arc_usr_cmpxchg, int __user *, uaddr, int, expected, int, new)
+{
+ struct pt_regs *regs = current_pt_regs();
+ u32 uval;
+ int ret;
+
+ /*
+ * This is only for old cores lacking LLOCK/SCOND, which by definition
+ * can't possibly be SMP. Thus doesn't need to be SMP safe.
+ * And this also helps reduce the overhead for serializing in
+ * the UP case
+ */
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
+
+ /* Z indicates to userspace if operation succeeded */
+ regs->status32 &= ~STATUS_Z_MASK;
+
+ ret = access_ok(uaddr, sizeof(*uaddr));
+ if (!ret)
+ goto fail;
+
+again:
+ preempt_disable();
+
+ ret = __get_user(uval, uaddr);
+ if (ret)
+ goto fault;
+
+ if (uval != expected)
+ goto out;
+
+ ret = __put_user(new, uaddr);
+ if (ret)
+ goto fault;
+
+ regs->status32 |= STATUS_Z_MASK;
+
+out:
+ preempt_enable();
+ return uval;
+
+fault:
+ preempt_enable();
+
+ if (unlikely(ret != -EFAULT))
+ goto fail;
+
+ mmap_read_lock(current->mm);
+ ret = fixup_user_fault(current->mm, (unsigned long) uaddr,
+ FAULT_FLAG_WRITE, NULL);
+ mmap_read_unlock(current->mm);
+
+ if (likely(!ret))
+ goto again;
+
+fail:
+ force_sig(SIGSEGV);
+ return ret;
+}
+
+#ifdef CONFIG_ISA_ARCV2
+
+void arch_cpu_idle(void)
+{
+ /* Re-enable interrupts <= default irq priority before committing SLEEP */
+ const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
+
+ __asm__ __volatile__(
+ "sleep %0 \n"
+ :
+ :"I"(arg)); /* can't be "r" has to be embedded const */
+
+ raw_local_irq_disable();
+}
+
+#else /* ARC700 */
+
+void arch_cpu_idle(void)
+{
+ /* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
+ __asm__ __volatile__("sleep 0x3 \n");
+ raw_local_irq_disable();
+}
+
+#endif
+
+asmlinkage void ret_from_fork(void);
+
+/*
+ * Copy architecture-specific thread state
+ *
+ * Layout of Child kernel mode stack as setup at the end of this function is
+ *
+ * | ... |
+ * | ... |
+ * | unused |
+ * | |
+ * ------------------
+ * | r25 | <==== top of Stack (thread_info.ksp)
+ * ~ ~
+ * | --to-- | (CALLEE Regs of kernel mode)
+ * | r13 |
+ * ------------------
+ * | fp |
+ * | blink | @ret_from_fork
+ * ------------------
+ * | |
+ * ~ ~
+ * ~ ~
+ * | |
+ * ------------------
+ * | r12 |
+ * ~ ~
+ * | --to-- | (scratch Regs of user mode)
+ * | r0 |
+ * ------------------
+ * | SP |
+ * | orig_r0 |
+ * | event/ECR |
+ * ------------------ <===== END of PAGE
+ */
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
+ struct pt_regs *c_regs; /* child's pt_regs */
+ unsigned long *childksp; /* to unwind out of __switch_to() */
+ struct callee_regs *c_callee; /* child's callee regs */
+ struct callee_regs *parent_callee; /* paren't callee */
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Mark the specific anchors to begin with (see pic above) */
+ c_regs = task_pt_regs(p);
+ childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
+ c_callee = ((struct callee_regs *)childksp) - 1;
+
+ /*
+ * __switch_to() uses thread_info.ksp to start unwinding stack
+ * For kernel threads we don't need to create callee regs, the
+ * stack layout nevertheless needs to remain the same.
+ * Also, since __switch_to anyways unwinds callee regs, we use
+ * this to populate kernel thread entry-pt/args into callee regs,
+ * so that ret_from_kernel_thread() becomes simpler.
+ */
+ task_thread_info(p)->ksp = (unsigned long)c_callee; /* THREAD_INFO_KSP */
+
+ /* __switch_to expects FP(0), BLINK(return addr) at top */
+ childksp[0] = 0; /* fp */
+ childksp[1] = (unsigned long)ret_from_fork; /* blink */
+
+ if (unlikely(args->fn)) {
+ memset(c_regs, 0, sizeof(struct pt_regs));
+
+ c_callee->r13 = (unsigned long)args->fn_arg;
+ c_callee->r14 = (unsigned long)args->fn;
+
+ return 0;
+ }
+
+ /*--------- User Task Only --------------*/
+
+ /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
+ childksp[0] = 0; /* for POP fp */
+ childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
+
+ /* Copy parents pt regs on child's kernel mode stack */
+ *c_regs = *regs;
+
+ if (usp)
+ c_regs->sp = usp;
+
+ c_regs->r0 = 0; /* fork returns 0 in child */
+
+ parent_callee = ((struct callee_regs *)regs) - 1;
+ *c_callee = *parent_callee;
+
+ if (unlikely(clone_flags & CLONE_SETTLS)) {
+ /*
+ * set task's userland tls data ptr from 4th arg
+ * clone C-lib call is difft from clone sys-call
+ */
+ task_thread_info(p)->thr_ptr = tls;
+ } else {
+ /* Normal fork case: set parent's TLS ptr in child */
+ task_thread_info(p)->thr_ptr =
+ task_thread_info(current)->thr_ptr;
+ }
+
+
+ /*
+ * setup usermode thread pointer #1:
+ * when child is picked by scheduler, __switch_to() uses @c_callee to
+ * populate usermode callee regs: this works (despite being in a kernel
+ * function) since special return path for child @ret_from_fork()
+ * ensures those regs are not clobbered all the way to RTIE to usermode
+ */
+ c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+ return 0;
+}
+
+/*
+ * Do necessary setup to start up a new user task
+ */
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
+{
+ regs->sp = usp;
+ regs->ret = pc;
+
+ /*
+ * [U]ser Mode bit set
+ * [L] ZOL loop inhibited to begin with - cleared by a LP insn
+ * Interrupts enabled
+ */
+ regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
+
+ fpu_init_task(regs);
+
+ /* bogus seed values for debugging */
+ regs->lp_start = 0x10;
+ regs->lp_end = 0x80;
+}
+
+/*
+ * Some archs flush debug and FPU info here
+ */
+void flush_thread(void)
+{
+}
+
+int elf_check_arch(const struct elf32_hdr *x)
+{
+ unsigned int eflags;
+
+ if (x->e_machine != EM_ARC_INUSE) {
+ pr_err("ELF not built for %s ISA\n",
+ is_isa_arcompact() ? "ARCompact":"ARCv2");
+ return 0;
+ }
+
+ eflags = x->e_flags;
+ if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
+ pr_err("ABI mismatch - you need newer toolchain\n");
+ force_fatal_sig(SIGSEGV);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(elf_check_arch);
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
new file mode 100644
index 0000000000..e0c233c178
--- /dev/null
+++ b/arch/arc/kernel/ptrace.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/ptrace.h>
+#include <linux/sched/task_stack.h>
+#include <linux/regset.h>
+#include <linux/unistd.h>
+#include <linux/elf.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+#ifdef CONFIG_ISA_ARCOMPACT
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(bta),
+ REG_OFFSET_NAME(lp_start),
+ REG_OFFSET_NAME(lp_end),
+ REG_OFFSET_NAME(lp_count),
+ REG_OFFSET_NAME(status32),
+ REG_OFFSET_NAME(ret),
+ REG_OFFSET_NAME(blink),
+ REG_OFFSET_NAME(fp),
+ REG_OFFSET_NAME(r26),
+ REG_OFFSET_NAME(r12),
+ REG_OFFSET_NAME(r11),
+ REG_OFFSET_NAME(r10),
+ REG_OFFSET_NAME(r9),
+ REG_OFFSET_NAME(r8),
+ REG_OFFSET_NAME(r7),
+ REG_OFFSET_NAME(r6),
+ REG_OFFSET_NAME(r5),
+ REG_OFFSET_NAME(r4),
+ REG_OFFSET_NAME(r3),
+ REG_OFFSET_NAME(r2),
+ REG_OFFSET_NAME(r1),
+ REG_OFFSET_NAME(r0),
+ REG_OFFSET_NAME(sp),
+ REG_OFFSET_NAME(orig_r0),
+ REG_OFFSET_NAME(ecr),
+ REG_OFFSET_END,
+};
+
+#else
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(orig_r0),
+ REG_OFFSET_NAME(ecr),
+ REG_OFFSET_NAME(bta),
+ REG_OFFSET_NAME(r26),
+ REG_OFFSET_NAME(fp),
+ REG_OFFSET_NAME(sp),
+ REG_OFFSET_NAME(r12),
+ REG_OFFSET_NAME(r30),
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ REG_OFFSET_NAME(r58),
+ REG_OFFSET_NAME(r59),
+#endif
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+ REG_OFFSET_NAME(DSP_CTRL),
+#endif
+ REG_OFFSET_NAME(r0),
+ REG_OFFSET_NAME(r1),
+ REG_OFFSET_NAME(r2),
+ REG_OFFSET_NAME(r3),
+ REG_OFFSET_NAME(r4),
+ REG_OFFSET_NAME(r5),
+ REG_OFFSET_NAME(r6),
+ REG_OFFSET_NAME(r7),
+ REG_OFFSET_NAME(r8),
+ REG_OFFSET_NAME(r9),
+ REG_OFFSET_NAME(r10),
+ REG_OFFSET_NAME(r11),
+ REG_OFFSET_NAME(blink),
+ REG_OFFSET_NAME(lp_end),
+ REG_OFFSET_NAME(lp_start),
+ REG_OFFSET_NAME(lp_count),
+ REG_OFFSET_NAME(ei),
+ REG_OFFSET_NAME(ldi),
+ REG_OFFSET_NAME(jli),
+ REG_OFFSET_NAME(ret),
+ REG_OFFSET_NAME(status32),
+ REG_OFFSET_END,
+};
+#endif
+
+static struct callee_regs *task_callee_regs(struct task_struct *tsk)
+{
+ struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
+ return tmp;
+}
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ const struct pt_regs *ptregs = task_pt_regs(target);
+ const struct callee_regs *cregs = task_callee_regs(target);
+ unsigned int stop_pc_val;
+
+ membuf_zero(&to, 4); // pad
+ membuf_store(&to, ptregs->bta);
+ membuf_store(&to, ptregs->lp_start);
+ membuf_store(&to, ptregs->lp_end);
+ membuf_store(&to, ptregs->lp_count);
+ membuf_store(&to, ptregs->status32);
+ membuf_store(&to, ptregs->ret);
+ membuf_store(&to, ptregs->blink);
+ membuf_store(&to, ptregs->fp);
+ membuf_store(&to, ptregs->r26); // gp
+ membuf_store(&to, ptregs->r12);
+ membuf_store(&to, ptregs->r11);
+ membuf_store(&to, ptregs->r10);
+ membuf_store(&to, ptregs->r9);
+ membuf_store(&to, ptregs->r8);
+ membuf_store(&to, ptregs->r7);
+ membuf_store(&to, ptregs->r6);
+ membuf_store(&to, ptregs->r5);
+ membuf_store(&to, ptregs->r4);
+ membuf_store(&to, ptregs->r3);
+ membuf_store(&to, ptregs->r2);
+ membuf_store(&to, ptregs->r1);
+ membuf_store(&to, ptregs->r0);
+ membuf_store(&to, ptregs->sp);
+ membuf_zero(&to, 4); // pad2
+ membuf_store(&to, cregs->r25);
+ membuf_store(&to, cregs->r24);
+ membuf_store(&to, cregs->r23);
+ membuf_store(&to, cregs->r22);
+ membuf_store(&to, cregs->r21);
+ membuf_store(&to, cregs->r20);
+ membuf_store(&to, cregs->r19);
+ membuf_store(&to, cregs->r18);
+ membuf_store(&to, cregs->r17);
+ membuf_store(&to, cregs->r16);
+ membuf_store(&to, cregs->r15);
+ membuf_store(&to, cregs->r14);
+ membuf_store(&to, cregs->r13);
+ membuf_store(&to, target->thread.fault_address); // efa
+
+ if (in_brkpt_trap(ptregs)) {
+ stop_pc_val = target->thread.fault_address;
+ pr_debug("\t\tstop_pc (brk-pt)\n");
+ } else {
+ stop_pc_val = ptregs->ret;
+ pr_debug("\t\tstop_pc (others)\n");
+ }
+
+ return membuf_store(&to, stop_pc_val); // stop_pc
+}
+
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const struct pt_regs *ptregs = task_pt_regs(target);
+ const struct callee_regs *cregs = task_callee_regs(target);
+ int ret = 0;
+
+#define REG_IN_CHUNK(FIRST, NEXT, PTR) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), \
+ offsetof(struct user_regs_struct, FIRST), \
+ offsetof(struct user_regs_struct, NEXT));
+
+#define REG_IN_ONE(LOC, PTR) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+#define REG_IGNORE_ONE(LOC) \
+ if (!ret) \
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+ REG_IGNORE_ONE(pad);
+
+ REG_IN_ONE(scratch.bta, &ptregs->bta);
+ REG_IN_ONE(scratch.lp_start, &ptregs->lp_start);
+ REG_IN_ONE(scratch.lp_end, &ptregs->lp_end);
+ REG_IN_ONE(scratch.lp_count, &ptregs->lp_count);
+
+ REG_IGNORE_ONE(scratch.status32);
+
+ REG_IN_ONE(scratch.ret, &ptregs->ret);
+ REG_IN_ONE(scratch.blink, &ptregs->blink);
+ REG_IN_ONE(scratch.fp, &ptregs->fp);
+ REG_IN_ONE(scratch.gp, &ptregs->r26);
+ REG_IN_ONE(scratch.r12, &ptregs->r12);
+ REG_IN_ONE(scratch.r11, &ptregs->r11);
+ REG_IN_ONE(scratch.r10, &ptregs->r10);
+ REG_IN_ONE(scratch.r9, &ptregs->r9);
+ REG_IN_ONE(scratch.r8, &ptregs->r8);
+ REG_IN_ONE(scratch.r7, &ptregs->r7);
+ REG_IN_ONE(scratch.r6, &ptregs->r6);
+ REG_IN_ONE(scratch.r5, &ptregs->r5);
+ REG_IN_ONE(scratch.r4, &ptregs->r4);
+ REG_IN_ONE(scratch.r3, &ptregs->r3);
+ REG_IN_ONE(scratch.r2, &ptregs->r2);
+ REG_IN_ONE(scratch.r1, &ptregs->r1);
+ REG_IN_ONE(scratch.r0, &ptregs->r0);
+ REG_IN_ONE(scratch.sp, &ptregs->sp);
+
+ REG_IGNORE_ONE(pad2);
+
+ REG_IN_ONE(callee.r25, &cregs->r25);
+ REG_IN_ONE(callee.r24, &cregs->r24);
+ REG_IN_ONE(callee.r23, &cregs->r23);
+ REG_IN_ONE(callee.r22, &cregs->r22);
+ REG_IN_ONE(callee.r21, &cregs->r21);
+ REG_IN_ONE(callee.r20, &cregs->r20);
+ REG_IN_ONE(callee.r19, &cregs->r19);
+ REG_IN_ONE(callee.r18, &cregs->r18);
+ REG_IN_ONE(callee.r17, &cregs->r17);
+ REG_IN_ONE(callee.r16, &cregs->r16);
+ REG_IN_ONE(callee.r15, &cregs->r15);
+ REG_IN_ONE(callee.r14, &cregs->r14);
+ REG_IN_ONE(callee.r13, &cregs->r13);
+
+ REG_IGNORE_ONE(efa); /* efa update invalid */
+ REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
+
+ return ret;
+}
+
+#ifdef CONFIG_ISA_ARCV2
+static int arcv2regs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS))
+ /*
+ * itemized copy not needed like above as layout of regs (r30,r58,r59)
+ * is exactly same in kernel (pt_regs) and userspace (user_regs_arcv2)
+ */
+ return membuf_write(&to, &regs->r30, sizeof(struct user_regs_arcv2));
+
+
+ membuf_write(&to, &regs->r30, 4); /* r30 only */
+ return membuf_zero(&to, sizeof(struct user_regs_arcv2) - 4);
+}
+
+static int arcv2regs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ int ret, copy_sz;
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS))
+ copy_sz = sizeof(struct user_regs_arcv2);
+ else
+ copy_sz = 4; /* r30 only */
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, (void *)&regs->r30,
+ 0, copy_sz);
+
+ return ret;
+}
+
+#endif
+
+enum arc_getset {
+ REGSET_CMN,
+ REGSET_ARCV2,
+};
+
+static const struct user_regset arc_regsets[] = {
+ [REGSET_CMN] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .regset_get = genregs_get,
+ .set = genregs_set,
+ },
+#ifdef CONFIG_ISA_ARCV2
+ [REGSET_ARCV2] = {
+ .core_note_type = NT_ARC_V2,
+ .n = ELF_ARCV2REG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .regset_get = arcv2regs_get,
+ .set = arcv2regs_set,
+ },
+#endif
+};
+
+static const struct user_regset_view user_arc_view = {
+ .name = "arc",
+ .e_machine = EM_ARC_INUSE,
+ .regsets = arc_regsets,
+ .n = ARRAY_SIZE(arc_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_arc_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = -EIO;
+
+ pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
+
+ switch (request) {
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_thread_info(child)->thr_ptr,
+ (unsigned long __user *)data);
+ break;
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ if (ptrace_report_syscall_entry(regs))
+ return ULONG_MAX;
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, syscall_get_nr(current, regs));
+#endif
+
+ return regs->r8;
+}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ptrace_report_syscall_exit(regs, 0);
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, regs_return_value(regs));
+#endif
+}
+
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
+
+bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return (addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
+}
+
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
diff --git a/arch/arc/kernel/reset.c b/arch/arc/kernel/reset.c
new file mode 100644
index 0000000000..fd6c3eb930
--- /dev/null
+++ b/arch/arc/kernel/reset.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+
+void machine_halt(void)
+{
+ /* Halt the processor */
+ __asm__ __volatile__("flag 1\n");
+}
+
+void machine_restart(char *__unused)
+{
+ /* Soft reset : jump to reset vector */
+ pr_info("Put your restart handler here\n");
+ machine_halt();
+}
+
+void machine_power_off(void)
+{
+ /* FIXME :: power off ??? */
+ machine_halt();
+}
+
+void (*pm_power_off) (void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
new file mode 100644
index 0000000000..d08a5092c2
--- /dev/null
+++ b/arch/arc/kernel/setup.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/root_dev.h>
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/cpu.h>
+#include <linux/of_clk.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+#include <linux/cache.h>
+#include <uapi/linux/mount.h>
+#include <asm/sections.h>
+#include <asm/arcregs.h>
+#include <asm/asserts.h>
+#include <asm/tlb.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/irq.h>
+#include <asm/unwind.h>
+#include <asm/mach_desc.h>
+#include <asm/smp.h>
+#include <asm/dsp-impl.h>
+#include <soc/arc/mcip.h>
+
+#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
+
+unsigned int intr_to_DE_cnt;
+
+/* Part of U-boot ABI: see head.S */
+int __initdata uboot_tag;
+int __initdata uboot_magic;
+char __initdata *uboot_arg;
+
+const struct machine_desc *machine_desc;
+
+struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
+
+struct cpuinfo_arc {
+ int arcver;
+ unsigned int t0:1, t1:1;
+ struct {
+ unsigned long base;
+ unsigned int sz;
+ } iccm, dccm;
+};
+
+#ifdef CONFIG_ISA_ARCV2
+
+static const struct id_to_str arc_hs_rel[] = {
+ /* ID.ARCVER, Release */
+ { 0x51, "R2.0" },
+ { 0x52, "R2.1" },
+ { 0x53, "R3.0" },
+};
+
+static const struct id_to_str arc_hs_ver54_rel[] = {
+ /* UARCH.MAJOR, Release */
+ { 0, "R3.10a"},
+ { 1, "R3.50a"},
+ { 2, "R3.60a"},
+ { 3, "R4.00a"},
+ { 0xFF, NULL }
+};
+#endif
+
+static int
+arcompact_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
+{
+ int n = 0;
+#ifdef CONFIG_ISA_ARCOMPACT
+ char *cpu_nm, *isa_nm = "ARCompact";
+ struct bcr_fp_arcompact fpu_sp, fpu_dp;
+ int atomic = 0, be, present;
+ int bpu_full, bpu_cache, bpu_pred;
+ struct bcr_bpu_arcompact bpu;
+ struct bcr_iccm_arcompact iccm;
+ struct bcr_dccm_arcompact dccm;
+ struct bcr_generic isa;
+
+ READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
+
+ if (!isa.ver) /* ISA BCR absent, use Kconfig info */
+ atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
+ else {
+ /* ARC700_BUILD only has 2 bits of isa info */
+ atomic = isa.info & 1;
+ }
+
+ be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+
+ if (info->arcver < 0x34)
+ cpu_nm = "ARC750";
+ else
+ cpu_nm = "ARC770";
+
+ n += scnprintf(buf + n, len - n, "processor [%d]\t: %s (%s ISA) %s%s%s\n",
+ c, cpu_nm, isa_nm,
+ IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+ IS_AVAIL1(be, "[Big-Endian]"));
+
+ READ_BCR(ARC_REG_FP_BCR, fpu_sp);
+ READ_BCR(ARC_REG_DPFP_BCR, fpu_dp);
+
+ if (fpu_sp.ver | fpu_dp.ver)
+ n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
+ IS_AVAIL1(fpu_sp.ver, "SP "),
+ IS_AVAIL1(fpu_dp.ver, "DP "));
+
+ READ_BCR(ARC_REG_BPU_BCR, bpu);
+ bpu_full = bpu.fam ? 1 : 0;
+ bpu_cache = 256 << (bpu.ent - 1);
+ bpu_pred = 256 << (bpu.ent - 1);
+
+ n += scnprintf(buf + n, len - n,
+ "BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
+ IS_AVAIL1(bpu_full, "full"),
+ IS_AVAIL1(!bpu_full, "partial"),
+ bpu_cache, bpu_pred);
+
+ READ_BCR(ARC_REG_ICCM_BUILD, iccm);
+ if (iccm.ver) {
+ info->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
+ info->iccm.base = iccm.base << 16;
+ }
+
+ READ_BCR(ARC_REG_DCCM_BUILD, dccm);
+ if (dccm.ver) {
+ unsigned long base;
+ info->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
+
+ base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
+ info->dccm.base = base & ~0xF;
+ }
+
+ /* ARCompact ISA specific sanity checks */
+ present = fpu_dp.ver; /* SP has no arch visible regs */
+ CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
+#endif
+ return n;
+
+}
+
+static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
+{
+ int n = 0;
+#ifdef CONFIG_ISA_ARCV2
+ const char *release = "", *cpu_nm = "HS38", *isa_nm = "ARCv2";
+ int dual_issue = 0, dual_enb = 0, mpy_opt, present;
+ int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
+ char mpy_nm[16], lpb_nm[32];
+ struct bcr_isa_arcv2 isa;
+ struct bcr_mpy mpy;
+ struct bcr_fp_arcv2 fpu;
+ struct bcr_bpu_arcv2 bpu;
+ struct bcr_lpb lpb;
+ struct bcr_iccm_arcv2 iccm;
+ struct bcr_dccm_arcv2 dccm;
+ struct bcr_erp erp;
+
+ /*
+ * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
+ * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
+ * releases only update it.
+ */
+
+ if (info->arcver > 0x50 && info->arcver <= 0x53) {
+ release = arc_hs_rel[info->arcver - 0x51].str;
+ } else {
+ const struct id_to_str *tbl;
+ struct bcr_uarch_build uarch;
+
+ READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
+
+ for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
+ if (uarch.maj == tbl->id) {
+ release = tbl->str;
+ break;
+ }
+ }
+ if (uarch.prod == 4) {
+ unsigned int exec_ctrl;
+
+ cpu_nm = "HS48";
+ dual_issue = 1;
+ /* if dual issue hardware, is it enabled ? */
+ READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+ dual_enb = !(exec_ctrl & 1);
+ }
+ }
+
+ READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
+
+ n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
+ c, cpu_nm, release, isa_nm,
+ IS_AVAIL1(isa.be, "[Big-Endian]"),
+ IS_AVAIL3(dual_issue, dual_enb, " Dual-Issue "));
+
+ READ_BCR(ARC_REG_MPY_BCR, mpy);
+ mpy_opt = 2; /* stock MPY/MPYH */
+ if (mpy.dsp) /* OPT 7-9 */
+ mpy_opt = mpy.dsp + 6;
+
+ scnprintf(mpy_nm, 16, "mpy[opt %d] ", mpy_opt);
+
+ READ_BCR(ARC_REG_FP_V2_BCR, fpu);
+
+ n += scnprintf(buf + n, len - n, "ISA Extn\t: %s%s%s%s%s%s%s%s%s%s%s\n",
+ IS_AVAIL2(isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+ IS_AVAIL2(isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
+ IS_AVAIL2(isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
+ IS_AVAIL1(mpy.ver, mpy_nm),
+ IS_AVAIL1(isa.div_rem, "div_rem "),
+ IS_AVAIL1((fpu.sp | fpu.dp), " FPU:"),
+ IS_AVAIL1(fpu.sp, " sp"),
+ IS_AVAIL1(fpu.dp, " dp"));
+
+ READ_BCR(ARC_REG_BPU_BCR, bpu);
+ bpu_full = bpu.ft;
+ bpu_cache = 256 << bpu.bce;
+ bpu_pred = 2048 << bpu.pte;
+ bpu_ret_stk = 4 << bpu.rse;
+
+ READ_BCR(ARC_REG_LPB_BUILD, lpb);
+ if (lpb.ver) {
+ unsigned int ctl;
+ ctl = read_aux_reg(ARC_REG_LPB_CTRL);
+
+ scnprintf(lpb_nm, sizeof(lpb_nm), " Loop Buffer:%d %s",
+ lpb.entries, IS_DISABLED_RUN(!ctl));
+ }
+
+ n += scnprintf(buf + n, len - n,
+ "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d%s\n",
+ IS_AVAIL1(bpu_full, "full"),
+ IS_AVAIL1(!bpu_full, "partial"),
+ bpu_cache, bpu_pred, bpu_ret_stk,
+ lpb_nm);
+
+ READ_BCR(ARC_REG_ICCM_BUILD, iccm);
+ if (iccm.ver) {
+ unsigned long base;
+ info->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
+ if (iccm.sz00 == 0xF && iccm.sz01 > 0)
+ info->iccm.sz <<= iccm.sz01;
+ base = read_aux_reg(ARC_REG_AUX_ICCM);
+ info->iccm.base = base & 0xF0000000;
+ }
+
+ READ_BCR(ARC_REG_DCCM_BUILD, dccm);
+ if (dccm.ver) {
+ unsigned long base;
+ info->dccm.sz = 256 << dccm.sz0;
+ if (dccm.sz0 == 0xF && dccm.sz1 > 0)
+ info->dccm.sz <<= dccm.sz1;
+ base = read_aux_reg(ARC_REG_AUX_DCCM);
+ info->dccm.base = base & 0xF0000000;
+ }
+
+ /* Error Protection: ECC/Parity */
+ READ_BCR(ARC_REG_ERP_BUILD, erp);
+ if (erp.ver) {
+ struct ctl_erp ctl;
+ READ_BCR(ARC_REG_ERP_CTRL, ctl);
+ /* inverted bits: 0 means enabled */
+ n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
+ IS_AVAIL3(erp.ic, !ctl.dpi, "IC "),
+ IS_AVAIL3(erp.dc, !ctl.dpd, "DC "),
+ IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
+ }
+
+ /* ARCv2 ISA specific sanity checks */
+ present = fpu.sp | fpu.dp | mpy.dsp; /* DSP and/or FPU */
+ CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
+
+ dsp_config_check();
+#endif
+ return n;
+}
+
+static char *arc_cpu_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
+{
+ struct bcr_identity ident;
+ struct bcr_timer timer;
+ struct bcr_generic bcr;
+ struct mcip_bcr mp;
+ struct bcr_actionpoint ap;
+ unsigned long vec_base;
+ int ap_num, ap_full, smart, rtt, n;
+
+ memset(info, 0, sizeof(struct cpuinfo_arc));
+
+ READ_BCR(AUX_IDENTITY, ident);
+ info->arcver = ident.family;
+
+ n = scnprintf(buf, len,
+ "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
+ ident.family, ident.cpu_id, ident.chip_id);
+
+ if (is_isa_arcompact()) {
+ n += arcompact_mumbojumbo(c, info, buf + n, len - n);
+ } else if (is_isa_arcv2()){
+ n += arcv2_mumbojumbo(c, info, buf + n, len - n);
+ }
+
+ n += arc_mmu_mumbojumbo(c, buf + n, len - n);
+ n += arc_cache_mumbojumbo(c, buf + n, len - n);
+
+ READ_BCR(ARC_REG_TIMERS_BCR, timer);
+ info->t0 = timer.t0;
+ info->t1 = timer.t1;
+
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
+ vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
+
+ n += scnprintf(buf + n, len - n,
+ "Timers\t\t: %s%s%s%s%s%s\nVector Table\t: %#lx\n",
+ IS_AVAIL1(timer.t0, "Timer0 "),
+ IS_AVAIL1(timer.t1, "Timer1 "),
+ IS_AVAIL2(timer.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
+ IS_AVAIL2(mp.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
+ vec_base);
+
+ READ_BCR(ARC_REG_AP_BCR, ap);
+ if (ap.ver) {
+ ap_num = 2 << ap.num;
+ ap_full = !ap.min;
+ }
+
+ READ_BCR(ARC_REG_SMART_BCR, bcr);
+ smart = bcr.ver ? 1 : 0;
+
+ READ_BCR(ARC_REG_RTT_BCR, bcr);
+ rtt = bcr.ver ? 1 : 0;
+
+ if (ap.ver | smart | rtt) {
+ n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
+ IS_AVAIL1(smart, "smaRT "),
+ IS_AVAIL1(rtt, "RTT "));
+ if (ap.ver) {
+ n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
+ ap_num,
+ ap_full ? "full":"min");
+ }
+ n += scnprintf(buf + n, len - n, "\n");
+ }
+
+ if (info->dccm.sz || info->iccm.sz)
+ n += scnprintf(buf + n, len - n,
+ "Extn [CCM]\t: DCCM @ %lx, %d KB / ICCM: @ %lx, %d KB\n",
+ info->dccm.base, TO_KB(info->dccm.sz),
+ info->iccm.base, TO_KB(info->iccm.sz));
+
+ return buf;
+}
+
+void chk_opt_strict(char *opt_name, bool hw_exists, bool opt_ena)
+{
+ if (hw_exists && !opt_ena)
+ pr_warn(" ! Enable %s for working apps\n", opt_name);
+ else if (!hw_exists && opt_ena)
+ panic("Disable %s, hardware NOT present\n", opt_name);
+}
+
+void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena)
+{
+ if (!hw_exists && opt_ena)
+ panic("Disable %s, hardware NOT present\n", opt_name);
+}
+
+/*
+ * ISA agnostic sanity checks
+ */
+static void arc_chk_core_config(struct cpuinfo_arc *info)
+{
+ if (!info->t0)
+ panic("Timer0 is not present!\n");
+
+ if (!info->t1)
+ panic("Timer1 is not present!\n");
+
+#ifdef CONFIG_ARC_HAS_DCCM
+ /*
+ * DCCM can be arbit placed in hardware.
+ * Make sure it's placement/sz matches what Linux is built with
+ */
+ if ((unsigned int)__arc_dccm_base != info->dccm.base)
+ panic("Linux built with incorrect DCCM Base address\n");
+
+ if (CONFIG_ARC_DCCM_SZ * SZ_1K != info->dccm.sz)
+ panic("Linux built with incorrect DCCM Size\n");
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICCM
+ if (CONFIG_ARC_ICCM_SZ * SZ_1K != info->iccm.sz)
+ panic("Linux built with incorrect ICCM Size\n");
+#endif
+}
+
+/*
+ * Initialize and setup the processor core
+ * This is called by all the CPUs thus should not do special case stuff
+ * such as only for boot CPU etc
+ */
+
+void setup_processor(void)
+{
+ struct cpuinfo_arc info;
+ int c = smp_processor_id();
+ char str[512];
+
+ pr_info("%s", arc_cpu_mumbojumbo(c, &info, str, sizeof(str)));
+ pr_info("%s", arc_platform_smp_cpuinfo());
+
+ arc_chk_core_config(&info);
+
+ arc_init_IRQ();
+ arc_mmu_init();
+ arc_cache_init();
+
+}
+
+static inline bool uboot_arg_invalid(unsigned long addr)
+{
+ /*
+ * Check that it is a untranslated address (although MMU is not enabled
+ * yet, it being a high address ensures this is not by fluke)
+ */
+ if (addr < PAGE_OFFSET)
+ return true;
+
+ /* Check that address doesn't clobber resident kernel image */
+ return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
+}
+
+#define IGNORE_ARGS "Ignore U-boot args: "
+
+/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
+#define UBOOT_TAG_NONE 0
+#define UBOOT_TAG_CMDLINE 1
+#define UBOOT_TAG_DTB 2
+/* We always pass 0 as magic from U-boot */
+#define UBOOT_MAGIC_VALUE 0
+
+void __init handle_uboot_args(void)
+{
+ bool use_embedded_dtb = true;
+ bool append_cmdline = false;
+
+ /* check that we know this tag */
+ if (uboot_tag != UBOOT_TAG_NONE &&
+ uboot_tag != UBOOT_TAG_CMDLINE &&
+ uboot_tag != UBOOT_TAG_DTB) {
+ pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
+ goto ignore_uboot_args;
+ }
+
+ if (uboot_magic != UBOOT_MAGIC_VALUE) {
+ pr_warn(IGNORE_ARGS "non zero uboot magic\n");
+ goto ignore_uboot_args;
+ }
+
+ if (uboot_tag != UBOOT_TAG_NONE &&
+ uboot_arg_invalid((unsigned long)uboot_arg)) {
+ pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
+ goto ignore_uboot_args;
+ }
+
+ /* see if U-boot passed an external Device Tree blob */
+ if (uboot_tag == UBOOT_TAG_DTB) {
+ machine_desc = setup_machine_fdt((void *)uboot_arg);
+
+ /* external Device Tree blob is invalid - use embedded one */
+ use_embedded_dtb = !machine_desc;
+ }
+
+ if (uboot_tag == UBOOT_TAG_CMDLINE)
+ append_cmdline = true;
+
+ignore_uboot_args:
+
+ if (use_embedded_dtb) {
+ machine_desc = setup_machine_fdt(__dtb_start);
+ if (!machine_desc)
+ panic("Embedded DT invalid\n");
+ }
+
+ /*
+ * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
+ * append processing can only happen after.
+ */
+ if (append_cmdline) {
+ /* Ensure a whitespace between the 2 cmdlines */
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
+ }
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ handle_uboot_args();
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ *cmdline_p = boot_command_line;
+
+ /* To force early parsing of things like mem=xxx */
+ parse_early_param();
+
+ /* Platform/board specific: e.g. early console registration */
+ if (machine_desc->init_early)
+ machine_desc->init_early();
+
+ smp_init_cpus();
+
+ setup_processor();
+ setup_arch_memory();
+
+ /* copy flat DT out of .init and then unflatten it */
+ unflatten_and_copy_device_tree();
+
+ /* Can be issue if someone passes cmd line arg "ro"
+ * But that is unlikely so keeping it as it is
+ */
+ root_mountflags &= ~MS_RDONLY;
+
+ arc_unwind_init();
+}
+
+/*
+ * Called from start_kernel() - boot CPU only
+ */
+void __init time_init(void)
+{
+ of_clk_init(NULL);
+ timer_probe();
+}
+
+static int __init customize_machine(void)
+{
+ if (machine_desc->init_machine)
+ machine_desc->init_machine();
+
+ return 0;
+}
+arch_initcall(customize_machine);
+
+static int __init init_late_machine(void)
+{
+ if (machine_desc->init_late)
+ machine_desc->init_late();
+
+ return 0;
+}
+late_initcall(init_late_machine);
+/*
+ * Get CPU information for use by the procfs.
+ */
+
+#define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
+#define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ char *str;
+ int cpu_id = ptr_to_cpu(v);
+ struct device *cpu_dev = get_cpu_device(cpu_id);
+ struct cpuinfo_arc info;
+ struct clk *cpu_clk;
+ unsigned long freq = 0;
+
+ if (!cpu_online(cpu_id)) {
+ seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
+ goto done;
+ }
+
+ str = (char *)__get_free_page(GFP_KERNEL);
+ if (!str)
+ goto done;
+
+ seq_printf(m, arc_cpu_mumbojumbo(cpu_id, &info, str, PAGE_SIZE));
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
+ cpu_id);
+ } else {
+ freq = clk_get_rate(cpu_clk);
+ }
+ if (freq)
+ seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
+ freq / 1000000, (freq / 10000) % 100);
+
+ seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100);
+
+ seq_printf(m, arc_platform_smp_cpuinfo());
+
+ free_page((unsigned long)str);
+done:
+ seq_printf(m, "\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ /*
+ * Callback returns cpu-id to iterator for show routine, NULL to stop.
+ * However since NULL is also a valid cpu-id (0), we use a round-about
+ * way to pass it w/o having to kmalloc/free a 2 byte string.
+ * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
+ */
+ return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo
+};
+
+static DEFINE_PER_CPU(struct cpu, cpu_topology);
+
+static int __init topology_init(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu)
+ register_cpu(&per_cpu(cpu_topology, cpu), cpu);
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
new file mode 100644
index 0000000000..8f6f4a5429
--- /dev/null
+++ b/arch/arc/kernel/signal.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Signal Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * vineetg: Jan 2010 (Restarting of timer related syscalls)
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ * -do_signal() supports TIF_RESTORE_SIGMASK
+ * -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
+ * -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
+ * -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
+ * -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
+ * the job to do_signal()
+ *
+ * vineetg: July 2009
+ * -Modified Code to support the uClibc provided userland sigreturn stub
+ * to avoid kernel synthesing it on user stack at runtime, costing TLB
+ * probes and Cache line flushes.
+ *
+ * vineetg: July 2009
+ * -In stash_usr_regs( ) and restore_usr_regs( ), save/restore of user regs
+ * in done in block copy rather than one word at a time.
+ * This saves around 2K of code and improves LMBench lat_sig <catch>
+ *
+ * rajeshwarr: Feb 2009
+ * - Support for Realtime Signals
+ *
+ * vineetg: Aug 11th 2008: Bug #94183
+ * -ViXS were still seeing crashes when using insmod to load drivers.
+ * It turned out that the code to change Execute permssions for TLB entries
+ * of user was not guarded for interrupts (mod_tlb_permission)
+ * This was causing TLB entries to be overwritten on unrelated indexes
+ *
+ * Vineetg: July 15th 2008: Bug #94183
+ * -Exception happens in Delay slot of a JMP, and before user space resumes,
+ * Signal is delivered (Ctrl + C) = >SIGINT.
+ * setup_frame( ) sets up PC,SP,BLINK to enable user space signal handler
+ * to run, but doesn't clear the Delay slot bit from status32. As a result,
+ * on resuming user mode, signal handler branches off to BTA of orig JMP
+ * -FIX: clear the DE bit from status32 in setup_frame( )
+ *
+ * Rahul Trivedi, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/resume_user_mode.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/ucontext.h>
+#include <asm/entry.h>
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+#define MAGIC_SIGALTSTK 0x07302004
+ unsigned int sigret_magic;
+};
+
+static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ v2abi.r30 = regs->r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ v2abi.r58 = regs->r58;
+ v2abi.r59 = regs->r59;
+#else
+ v2abi.r58 = v2abi.r59 = 0;
+#endif
+ err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
+#endif
+ return err;
+}
+
+static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
+
+ regs->r30 = v2abi.r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ regs->r58 = v2abi.r58;
+ regs->r59 = v2abi.r59;
+#endif
+#endif
+ return err;
+}
+
+static int
+stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ sigset_t *set)
+{
+ int err;
+ struct user_regs_struct uregs;
+
+ uregs.scratch.bta = regs->bta;
+ uregs.scratch.lp_start = regs->lp_start;
+ uregs.scratch.lp_end = regs->lp_end;
+ uregs.scratch.lp_count = regs->lp_count;
+ uregs.scratch.status32 = regs->status32;
+ uregs.scratch.ret = regs->ret;
+ uregs.scratch.blink = regs->blink;
+ uregs.scratch.fp = regs->fp;
+ uregs.scratch.gp = regs->r26;
+ uregs.scratch.r12 = regs->r12;
+ uregs.scratch.r11 = regs->r11;
+ uregs.scratch.r10 = regs->r10;
+ uregs.scratch.r9 = regs->r9;
+ uregs.scratch.r8 = regs->r8;
+ uregs.scratch.r7 = regs->r7;
+ uregs.scratch.r6 = regs->r6;
+ uregs.scratch.r5 = regs->r5;
+ uregs.scratch.r4 = regs->r4;
+ uregs.scratch.r3 = regs->r3;
+ uregs.scratch.r2 = regs->r2;
+ uregs.scratch.r1 = regs->r1;
+ uregs.scratch.r0 = regs->r0;
+ uregs.scratch.sp = regs->sp;
+
+ err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+{
+ sigset_t set;
+ int err;
+ struct user_regs_struct uregs;
+
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ err |= __copy_from_user(&uregs.scratch,
+ &(sf->uc.uc_mcontext.regs.scratch),
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
+ if (err)
+ return -EFAULT;
+
+ set_current_blocked(&set);
+ regs->bta = uregs.scratch.bta;
+ regs->lp_start = uregs.scratch.lp_start;
+ regs->lp_end = uregs.scratch.lp_end;
+ regs->lp_count = uregs.scratch.lp_count;
+ regs->status32 = uregs.scratch.status32;
+ regs->ret = uregs.scratch.ret;
+ regs->blink = uregs.scratch.blink;
+ regs->fp = uregs.scratch.fp;
+ regs->r26 = uregs.scratch.gp;
+ regs->r12 = uregs.scratch.r12;
+ regs->r11 = uregs.scratch.r11;
+ regs->r10 = uregs.scratch.r10;
+ regs->r9 = uregs.scratch.r9;
+ regs->r8 = uregs.scratch.r8;
+ regs->r7 = uregs.scratch.r7;
+ regs->r6 = uregs.scratch.r6;
+ regs->r5 = uregs.scratch.r5;
+ regs->r4 = uregs.scratch.r4;
+ regs->r3 = uregs.scratch.r3;
+ regs->r2 = uregs.scratch.r2;
+ regs->r1 = uregs.scratch.r1;
+ regs->r0 = uregs.scratch.r0;
+ regs->sp = uregs.scratch.sp;
+
+ return 0;
+}
+
+static inline int is_do_ss_needed(unsigned int magic)
+{
+ if (MAGIC_SIGALTSTK == magic)
+ return 1;
+ else
+ return 0;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct rt_sigframe __user *sf;
+ unsigned int magic;
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ /* Since we stacked the signal on a word boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->sp & 3)
+ goto badframe;
+
+ sf = (struct rt_sigframe __force __user *)(regs->sp);
+
+ if (!access_ok(sf, sizeof(*sf)))
+ goto badframe;
+
+ if (__get_user(magic, &sf->sigret_magic))
+ goto badframe;
+
+ if (unlikely(is_do_ss_needed(magic)))
+ if (restore_altstack(&sf->uc.uc_stack))
+ goto badframe;
+
+ if (restore_usr_regs(regs, sf))
+ goto badframe;
+
+ /* Don't restart from sigreturn */
+ syscall_wont_restart(regs);
+
+ /*
+ * Ensure that sigreturn always returns to user mode (in case the
+ * regs saved on user stack got fudged between save and sigreturn)
+ * Otherwise it is easy to panic the kernel with a custom
+ * signal handler and/or restorer which clobberes the status32/ret
+ * to return to a bogus location in kernel mode.
+ */
+ regs->status32 |= STATUS_U_MASK;
+
+ return regs->r0;
+
+badframe:
+ force_sig(SIGSEGV);
+ return 0;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *get_sigframe(struct ksignal *ksig,
+ struct pt_regs *regs,
+ unsigned long framesize)
+{
+ unsigned long sp = sigsp(regs->sp, ksig);
+ void __user *frame;
+
+ /* No matter what happens, 'sp' must be word
+ * aligned otherwise nasty things could happen
+ */
+
+ /* ATPCS B01 mandates 8-byte alignment */
+ frame = (void __user *)((sp - framesize) & ~7);
+
+ /* Check that we can actually write to the signal frame */
+ if (!access_ok(frame, framesize))
+ frame = NULL;
+
+ return frame;
+}
+
+static int
+setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *sf;
+ unsigned int magic = 0;
+ int err = 0;
+
+ sf = get_sigframe(ksig, regs, sizeof(struct rt_sigframe));
+ if (!sf)
+ return 1;
+
+ /*
+ * w/o SA_SIGINFO, struct ucontext is partially populated (only
+ * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+ * during signal handler execution. This works for SA_SIGINFO as well
+ * although the semantics are now overloaded (the same reg state can be
+ * inspected by userland: but are they allowed to fiddle with it ?
+ */
+ err |= stash_usr_regs(sf, regs, set);
+
+ /*
+ * SA_SIGINFO requires 3 args to signal handler:
+ * #1: sig-no (common to any handler)
+ * #2: struct siginfo
+ * #3: struct ucontext (completely populated)
+ */
+ if (unlikely(ksig->ka.sa.sa_flags & SA_SIGINFO)) {
+ err |= copy_siginfo_to_user(&sf->info, &ksig->info);
+ err |= __put_user(0, &sf->uc.uc_flags);
+ err |= __put_user(NULL, &sf->uc.uc_link);
+ err |= __save_altstack(&sf->uc.uc_stack, regs->sp);
+
+ /* setup args 2 and 3 for user mode handler */
+ regs->r1 = (unsigned long)&sf->info;
+ regs->r2 = (unsigned long)&sf->uc;
+
+ /*
+ * small optim to avoid unconditionally calling do_sigaltstack
+ * in sigreturn path, now that we only have rt_sigreturn
+ */
+ magic = MAGIC_SIGALTSTK;
+ }
+
+ err |= __put_user(magic, &sf->sigret_magic);
+ if (err)
+ return err;
+
+ /* #1 arg to the user Signal handler */
+ regs->r0 = ksig->sig;
+
+ /* setup PC of user space signal handler */
+ regs->ret = (unsigned long)ksig->ka.sa.sa_handler;
+
+ /*
+ * handler returns using sigreturn stub provided already by userspace
+ * If not, nuke the process right away
+ */
+ if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
+ return 1;
+
+ regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
+
+ /* User Stack for signal handler will be above the frame just carved */
+ regs->sp = (unsigned long)sf;
+
+ /*
+ * Bug 94183, Clear the DE bit, so that when signal handler
+ * starts to run, it doesn't use BTA
+ */
+ regs->status32 &= ~STATUS_DE_MASK;
+ regs->status32 |= STATUS_L_MASK;
+
+ return err;
+}
+
+static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
+{
+ switch (regs->r0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ /*
+ * ERESTARTNOHAND means that the syscall should
+ * only be restarted if there was no handler for
+ * the signal, and since we only get here if there
+ * is a handler, we don't restart
+ */
+ regs->r0 = -EINTR; /* ERESTART_xxx is internal */
+ break;
+
+ case -ERESTARTSYS:
+ /*
+ * ERESTARTSYS means to restart the syscall if
+ * there is no handler or the handler was
+ * registered with SA_RESTART
+ */
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->r0 = -EINTR;
+ break;
+ }
+ fallthrough;
+
+ case -ERESTARTNOINTR:
+ /*
+ * ERESTARTNOINTR means that the syscall should
+ * be called again after the signal handler returns.
+ * Setup reg state just as it was before doing the trap
+ * r0 has been clobbered with sys call ret code thus it
+ * needs to be reloaded with orig first arg to syscall
+ * in orig_r0. Rest of relevant reg-file:
+ * r8 (syscall num) and (r1 - r7) will be reset to
+ * their orig user space value when we ret from kernel
+ */
+ regs->r0 = regs->orig_r0;
+ regs->ret -= is_isa_arcv2() ? 2 : 4;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int failed;
+
+ /* Set up the stack frame */
+ failed = setup_rt_frame(ksig, oldset, regs);
+
+ signal_setup_done(failed, ksig, 0);
+}
+
+void do_signal(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+ int restart_scall;
+
+ restart_scall = in_syscall(regs) && syscall_restartable(regs);
+
+ if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) {
+ if (restart_scall) {
+ arc_restart_syscall(&ksig.ka, regs);
+ syscall_wont_restart(regs); /* No more restarts */
+ }
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ if (restart_scall) {
+ /* No handler for syscall: restart it */
+ if (regs->r0 == -ERESTARTNOHAND ||
+ regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) {
+ regs->r0 = regs->orig_r0;
+ regs->ret -= is_isa_arcv2() ? 2 : 4;
+ } else if (regs->r0 == -ERESTART_RESTARTBLOCK) {
+ regs->r8 = __NR_restart_syscall;
+ regs->ret -= is_isa_arcv2() ? 2 : 4;
+ }
+ syscall_wont_restart(regs); /* No more restarts */
+ }
+
+ /* If there's no signal to deliver, restore the saved sigmask back */
+ restore_saved_sigmask();
+}
+
+void do_notify_resume(struct pt_regs *regs)
+{
+ /*
+ * ASM glue guarantees that this is only called when returning to
+ * user mode
+ */
+ if (test_thread_flag(TIF_NOTIFY_RESUME))
+ resume_user_mode_work(regs);
+}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
new file mode 100644
index 0000000000..8d9b188caa
--- /dev/null
+++ b/arch/arc/kernel/smp.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * RajeshwarR: Dec 11, 2007
+ * -- Added support for Inter Processor Interrupts
+ *
+ * Vineetg: Nov 1st, 2007
+ * -- Initial Write (Borrowed heavily from ARM)
+ */
+
+#include <linux/spinlock.h>
+#include <linux/sched/mm.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/irq.h>
+#include <linux/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/reboot.h>
+#include <linux/irqdomain.h>
+#include <linux/export.h>
+#include <linux/of_fdt.h>
+
+#include <asm/mach_desc.h>
+#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/processor.h>
+
+#ifndef CONFIG_ARC_HAS_LLSC
+arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
+#endif
+
+struct plat_smp_ops __weak plat_smp_ops;
+
+/* XXX: per cpu ? Only needed once in early secondary boot */
+struct task_struct *secondary_idle_tsk;
+
+/* Called from start_kernel */
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
+{
+ unsigned long dt_root = of_get_flat_dt_root();
+ const char *buf;
+
+ buf = of_get_flat_dt_prop(dt_root, name, NULL);
+ if (!buf)
+ return -EINVAL;
+
+ if (cpulist_parse(buf, cpumask))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Read from DeviceTree and setup cpu possible mask. If there is no
+ * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
+ */
+static void __init arc_init_cpu_possible(void)
+{
+ struct cpumask cpumask;
+
+ if (arc_get_cpu_map("possible-cpus", &cpumask)) {
+ pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
+ NR_CPUS);
+
+ cpumask_setall(&cpumask);
+ }
+
+ if (!cpumask_test_cpu(0, &cpumask))
+ panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
+
+ init_cpu_possible(&cpumask);
+}
+
+/*
+ * Called from setup_arch() before calling setup_processor()
+ *
+ * - Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ * - Call early smp init hook. This can initialize a specific multi-core
+ * IP which is say common to several platforms (hence not part of
+ * platform specific int_early() hook)
+ */
+void __init smp_init_cpus(void)
+{
+ arc_init_cpu_possible();
+
+ if (plat_smp_ops.init_early_smp)
+ plat_smp_ops.init_early_smp();
+}
+
+/* called from init ( ) => process 1 */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ /*
+ * if platform didn't set the present map already, do it now
+ * boot cpu is set to present already by init/main.c
+ */
+ if (num_present_cpus() <= 1)
+ init_cpu_present(cpu_possible_mask);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+
+}
+
+/*
+ * Default smp boot helper for Run-on-reset case where all cores start off
+ * together. Non-masters need to wait for Master to start running.
+ * This is implemented using a flag in memory, which Non-masters spin-wait on.
+ * Master sets it to cpu-id of core to "ungate" it.
+ */
+static volatile int wake_flag;
+
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f) f
+#define __boot_write(f, v) f = v
+
+#else
+
+#define __boot_read(f) arc_read_uncached_32(&f)
+#define __boot_write(f, v) arc_write_uncached_32(&f, v)
+
+#endif
+
+static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
+{
+ BUG_ON(cpu == 0);
+
+ __boot_write(wake_flag, cpu);
+}
+
+void arc_platform_smp_wait_to_boot(int cpu)
+{
+ /* for halt-on-reset, we've waited already */
+ if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+ return;
+
+ while (__boot_read(wake_flag) != cpu)
+ ;
+
+ __boot_write(wake_flag, 0);
+}
+
+const char *arc_platform_smp_cpuinfo(void)
+{
+ return plat_smp_ops.info ? : "";
+}
+
+/*
+ * The very first "C" code executed by secondary
+ * Called from asm stub in head.S
+ * "current"/R25 already setup by low level boot code
+ */
+void start_kernel_secondary(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ /* MMU, Caches, Vector Table, Interrupts etc */
+ setup_processor();
+
+ mmget(mm);
+ mmgrab(mm);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+
+ /* Some SMP H/w setup - for each cpu */
+ if (plat_smp_ops.init_per_cpu)
+ plat_smp_ops.init_per_cpu(cpu);
+
+ if (machine_desc->init_per_cpu)
+ machine_desc->init_per_cpu(cpu);
+
+ notify_cpu_starting(cpu);
+ set_cpu_online(cpu, true);
+
+ pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
+
+ local_irq_enable();
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+/*
+ * Called from kernel_init( ) -> smp_init( ) - for each CPU
+ *
+ * At this point, Secondary Processor is "HALT"ed:
+ * -It booted, but was halted in head.S
+ * -It was configured to halt-on-reset
+ * So need to wake it up.
+ *
+ * Essential requirements being where to run from (PC) and stack (SP)
+*/
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long wait_till;
+
+ secondary_idle_tsk = idle;
+
+ pr_info("Idle Task [%d] %p", cpu, idle);
+ pr_info("Trying to bring up CPU%u ...\n", cpu);
+
+ if (plat_smp_ops.cpu_kick)
+ plat_smp_ops.cpu_kick(cpu,
+ (unsigned long)first_lines_of_secondary);
+ else
+ arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
+
+ /* wait for 1 sec after kicking the secondary */
+ wait_till = jiffies + HZ;
+ while (time_before(jiffies, wait_till)) {
+ if (cpu_online(cpu))
+ break;
+ }
+
+ if (!cpu_online(cpu)) {
+ pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
+ return -1;
+ }
+
+ secondary_idle_tsk = NULL;
+
+ return 0;
+}
+
+/*****************************************************************************/
+/* Inter Processor Interrupt Handling */
+/*****************************************************************************/
+
+enum ipi_msg_type {
+ IPI_EMPTY = 0,
+ IPI_RESCHEDULE = 1,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+};
+
+/*
+ * In arches with IRQ for each msg type (above), receiver can use IRQ-id to
+ * figure out what msg was sent. For those which don't (ARC has dedicated IPI
+ * IRQ), the msg-type needs to be conveyed via per-cpu data
+ */
+
+static DEFINE_PER_CPU(unsigned long, ipi_data);
+
+static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
+{
+ unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
+ unsigned long old, new;
+ unsigned long flags;
+
+ pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
+
+ local_irq_save(flags);
+
+ /*
+ * Atomically write new msg bit (in case others are writing too),
+ * and read back old value
+ */
+ do {
+ new = old = *ipi_data_ptr;
+ new |= 1U << msg;
+ } while (cmpxchg(ipi_data_ptr, old, new) != old);
+
+ /*
+ * Call the platform specific IPI kick function, but avoid if possible:
+ * Only do so if there's no pending msg from other concurrent sender(s).
+ * Otherwise, receiver will see this msg as well when it takes the
+ * IPI corresponding to that msg. This is true, even if it is already in
+ * IPI handler, because !@old means it has not yet dequeued the msg(s)
+ * so @new msg can be a free-loader
+ */
+ if (plat_smp_ops.ipi_send && !old)
+ plat_smp_ops.ipi_send(cpu);
+
+ local_irq_restore(flags);
+}
+
+static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, callmap)
+ ipi_send_msg_one(cpu, msg);
+}
+
+void arch_smp_send_reschedule(int cpu)
+{
+ ipi_send_msg_one(cpu, IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ ipi_send_msg(&targets, IPI_CPU_STOP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ ipi_send_msg_one(cpu, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ ipi_send_msg(mask, IPI_CALL_FUNC);
+}
+
+/*
+ * ipi_cpu_stop - handle IPI from smp_send_stop()
+ */
+static void ipi_cpu_stop(void)
+{
+ machine_halt();
+}
+
+static inline int __do_IPI(unsigned long msg)
+{
+ int rc = 0;
+
+ switch (msg) {
+ case IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CPU_STOP:
+ ipi_cpu_stop();
+ break;
+
+ default:
+ rc = 1;
+ }
+
+ return rc;
+}
+
+/*
+ * arch-common ISR to handle for inter-processor interrupts
+ * Has hooks for platform specific IPI
+ */
+static irqreturn_t do_IPI(int irq, void *dev_id)
+{
+ unsigned long pending;
+ unsigned long __maybe_unused copy;
+
+ pr_debug("IPI [%ld] received on cpu %d\n",
+ *this_cpu_ptr(&ipi_data), smp_processor_id());
+
+ if (plat_smp_ops.ipi_clear)
+ plat_smp_ops.ipi_clear(irq);
+
+ /*
+ * "dequeue" the msg corresponding to this IPI (and possibly other
+ * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
+ */
+ copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
+
+ do {
+ unsigned long msg = __ffs(pending);
+ int rc;
+
+ rc = __do_IPI(msg);
+ if (rc)
+ pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
+ pending &= ~(1U << msg);
+ } while (pending);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * API called by platform code to hookup arch-common ISR to their IPI IRQ
+ *
+ * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
+ * function needs to call irq_set_percpu_devid() for IPI IRQ, otherwise
+ * request_percpu_irq() below will fail
+ */
+static DEFINE_PER_CPU(int, ipi_dev);
+
+int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
+{
+ int *dev = per_cpu_ptr(&ipi_dev, cpu);
+ unsigned int virq = irq_find_mapping(NULL, hwirq);
+
+ if (!virq)
+ panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
+
+ /* Boot cpu calls request, all call enable */
+ if (!cpu) {
+ int rc;
+
+ rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
+ if (rc)
+ panic("Percpu IRQ request failed for %u\n", virq);
+ }
+
+ enable_percpu_irq(virq, 0);
+
+ return 0;
+}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
new file mode 100644
index 0000000000..ea99c066ef
--- /dev/null
+++ b/arch/arc/kernel/stacktrace.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * stacktrace.c : stacktracing APIs needed by rest of kernel
+ * (wrappers over ARC dwarf based unwinder)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * vineetg: aug 2009
+ * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
+ * for displaying task's kernel mode call stack in /proc/<pid>/stack
+ * -Iterator based approach to have single copy of unwinding core and APIs
+ * needing unwinding, implement the logic in iterator regarding:
+ * = which frame onwards to start capture
+ * = which frame to stop capturing (wchan)
+ * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
+ *
+ * vineetg: March 2009
+ * -Implemented correct versions of thread_saved_pc() and __get_wchan()
+ *
+ * rajeshwarr: 2008
+ * -Initial implementation
+ */
+
+#include <linux/ptrace.h>
+#include <linux/export.h>
+#include <linux/stacktrace.h>
+#include <linux/kallsyms.h>
+#include <linux/sched/debug.h>
+
+#include <asm/arcregs.h>
+#include <asm/unwind.h>
+#include <asm/stacktrace.h>
+#include <asm/switch_to.h>
+
+/*-------------------------------------------------------------------------
+ * Unwinder Iterator
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+static int
+seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
+ struct unwind_frame_info *frame_info)
+{
+ if (regs) {
+ /*
+ * Asynchronous unwinding of intr/exception
+ * - Just uses the pt_regs passed
+ */
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = regs->fp;
+ frame_info->regs.r28 = regs->sp;
+ frame_info->regs.r31 = regs->blink;
+ frame_info->regs.r63 = regs->ret;
+ frame_info->call_frame = 0;
+ } else if (tsk == NULL || tsk == current) {
+ /*
+ * synchronous unwinding (e.g. dump_stack)
+ * - uses current values of SP and friends
+ */
+ unsigned long fp, sp, blink, ret;
+ frame_info->task = current;
+
+ __asm__ __volatile__(
+ "mov %0,r27\n\t"
+ "mov %1,r28\n\t"
+ "mov %2,r31\n\t"
+ "mov %3,r63\n\t"
+ : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
+ );
+
+ frame_info->regs.r27 = fp;
+ frame_info->regs.r28 = sp;
+ frame_info->regs.r31 = blink;
+ frame_info->regs.r63 = ret;
+ frame_info->call_frame = 0;
+ } else {
+ /*
+ * Asynchronous unwinding of a likely sleeping task
+ * - first ensure it is actually sleeping
+ * - if so, it will be in __switch_to, kernel mode SP of task
+ * is safe-kept and BLINK at a well known location in there
+ */
+
+ if (task_is_running(tsk))
+ return -1;
+
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = TSK_K_FP(tsk);
+ frame_info->regs.r28 = TSK_K_ESP(tsk);
+ frame_info->regs.r31 = TSK_K_BLINK(tsk);
+ frame_info->regs.r63 = (unsigned int)__switch_to;
+
+ /* In the prologue of __switch_to, first FP is saved on stack
+ * and then SP is copied to FP. Dwarf assumes cfa as FP based
+ * but we didn't save FP. The value retrieved above is FP's
+ * state in previous frame.
+ * As a work around for this, we unwind from __switch_to start
+ * and adjust SP accordingly. The other limitation is that
+ * __switch_to macro is dwarf rules are not generated for inline
+ * assembly code
+ */
+ frame_info->regs.r27 = 0;
+ frame_info->regs.r28 += 60;
+ frame_info->call_frame = 0;
+
+ }
+ return 0;
+}
+
+#endif
+
+notrace noinline unsigned int
+arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ int (*consumer_fn) (unsigned int, void *), void *arg)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ int ret = 0, cnt = 0;
+ unsigned int address;
+ struct unwind_frame_info frame_info;
+
+ if (seed_unwind_frame_info(tsk, regs, &frame_info))
+ return 0;
+
+ while (1) {
+ address = UNW_PC(&frame_info);
+
+ if (!address || !__kernel_text_address(address))
+ break;
+
+ if (consumer_fn(address, arg) == -1)
+ break;
+
+ ret = arc_unwind(&frame_info);
+ if (ret)
+ break;
+
+ frame_info.regs.r63 = frame_info.regs.r31;
+
+ if (cnt++ > 128) {
+ printk("unwinder looping too long, aborting !\n");
+ return 0;
+ }
+ }
+
+ return address; /* return the last address it saw */
+#else
+ /* On ARC, only Dward based unwinder works. fp based backtracing is
+ * not possible (-fno-omit-frame-pointer) because of the way function
+ * prologue is setup (callee regs saved and then fp set and not other
+ * way around
+ */
+ pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ return 0;
+
+#endif
+}
+
+/*-------------------------------------------------------------------------
+ * callbacks called by unwinder iterator to implement kernel APIs
+ *
+ * The callback can return -1 to force the iterator to stop, which by default
+ * keeps going till the bottom-most frame.
+ *-------------------------------------------------------------------------
+ */
+
+/* Call-back which plugs into unwinding core to dump the stack in
+ * case of panic/OOPs/BUG etc
+ */
+static int __print_sym(unsigned int address, void *arg)
+{
+ const char *loglvl = arg;
+
+ printk("%s %pS\n", loglvl, (void *)address);
+ return 0;
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/* Call-back which plugs into unwinding core to capture the
+ * traces needed by kernel on /proc/<pid>/stack
+ */
+static int __collect_all(unsigned int address, void *arg)
+{
+ struct stack_trace *trace = arg;
+
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = address;
+
+ if (trace->nr_entries >= trace->max_entries)
+ return -1;
+
+ return 0;
+}
+
+static int __collect_all_but_sched(unsigned int address, void *arg)
+{
+ struct stack_trace *trace = arg;
+
+ if (in_sched_functions(address))
+ return 0;
+
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = address;
+
+ if (trace->nr_entries >= trace->max_entries)
+ return -1;
+
+ return 0;
+}
+
+#endif
+
+static int __get_first_nonsched(unsigned int address, void *unused)
+{
+ if (in_sched_functions(address))
+ return 0;
+
+ return -1;
+}
+
+/*-------------------------------------------------------------------------
+ * APIs expected by various kernel sub-systems
+ *-------------------------------------------------------------------------
+ */
+
+noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
+ const char *loglvl)
+{
+ printk("%s\nStack Trace:\n", loglvl);
+ arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
+}
+EXPORT_SYMBOL(show_stacktrace);
+
+/* Expected by sched Code */
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
+{
+ show_stacktrace(tsk, NULL, loglvl);
+}
+
+/* Another API expected by schedular, shows up in "ps" as Wait Channel
+ * Of course just returning schedule( ) would be pointless so unwind until
+ * the function is not in schedular code
+ */
+unsigned int __get_wchan(struct task_struct *tsk)
+{
+ return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/*
+ * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
+ * A typical use is when /proc/<pid>/stack is queried by userland
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ /* Assumes @tsk is sleeping so unwinds from __switch_to */
+ arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ /* Pass NULL for task so it unwinds the current call frame */
+ arc_unwind_core(NULL, NULL, __collect_all, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+#endif
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
new file mode 100644
index 0000000000..1069446bdc
--- /dev/null
+++ b/arch/arc/kernel/sys.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#define sys_clone sys_clone_wrapper
+#define sys_clone3 sys_clone3_wrapper
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[NR_syscalls] = {
+ [0 ... NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
new file mode 100644
index 0000000000..9b9570b793
--- /dev/null
+++ b/arch/arc/kernel/traps.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Traps/Non-MMU Exception handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * vineetg: May 2011
+ * -user-space unaligned access emulation
+ *
+ * Rahul Trivedi: Codito Technologies 2004
+ */
+
+#include <linux/sched/signal.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <linux/kprobes.h>
+#include <linux/kgdb.h>
+#include <asm/entry.h>
+#include <asm/setup.h>
+#include <asm/unaligned.h>
+#include <asm/kprobes.h>
+
+void die(const char *str, struct pt_regs *regs, unsigned long address)
+{
+ show_kernel_fault_diag(str, regs, address);
+
+ /* DEAD END */
+ __asm__("flag 1");
+}
+
+/*
+ * Helper called for bulk of exceptions NOT needing specific handling
+ * -for user faults enqueues requested signal
+ * -for kernel, chk if due to copy_(to|from)_user, otherwise die()
+ */
+static noinline int
+unhandled_exception(const char *str, struct pt_regs *regs,
+ int signo, int si_code, void __user *addr)
+{
+ if (user_mode(regs)) {
+ struct task_struct *tsk = current;
+
+ tsk->thread.fault_address = (__force unsigned int)addr;
+
+ force_sig_fault(signo, si_code, addr);
+
+ } else {
+ /* If not due to copy_(to|from)_user, we are doomed */
+ if (fixup_exception(regs))
+ return 0;
+
+ die(str, regs, (unsigned long)addr);
+ }
+
+ return 1;
+}
+
+#define DO_ERROR_INFO(signr, str, name, sicode) \
+int name(unsigned long address, struct pt_regs *regs) \
+{ \
+ return unhandled_exception(str, regs, signr, sicode, \
+ (void __user *)address); \
+}
+
+/*
+ * Entry points for exceptions NOT needing specific handling
+ */
+DO_ERROR_INFO(SIGILL, "Priv Op/Disabled Extn", do_privilege_fault, ILL_PRVOPC)
+DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC)
+DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
+DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR)
+DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
+DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
+DO_ERROR_INFO(SIGSEGV, "gcc generated __builtin_trap", do_trap5_error, 0)
+
+/*
+ * Entry Point for Misaligned Data access Exception, for emulating in software
+ */
+int do_misaligned_access(unsigned long address, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ /* If emulation not enabled, or failed, kill the task */
+ if (misaligned_fixup(address, regs, cregs) != 0)
+ return do_misaligned_error(address, regs);
+
+ return 0;
+}
+
+/*
+ * Entry point for miscll errors such as Nested Exceptions
+ * -Duplicate TLB entry is handled seperately though
+ */
+void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
+{
+ die("Unhandled Machine Check Exception", regs, address);
+}
+
+
+/*
+ * Entry point for traps induced by ARCompact TRAP_S <n> insn
+ * This is same family as TRAP0/SWI insn (use the same vector).
+ * The only difference being SWI insn take no operand, while TRAP_S does
+ * which reflects in ECR Reg as 8 bit param.
+ * Thus TRAP_S <n> can be used for specific purpose
+ * -1 used for software breakpointing (gdb)
+ * -2 used by kprobes
+ * -5 __builtin_trap() generated by gcc (2018.03 onwards) for toggle such as
+ * -fno-isolate-erroneous-paths-dereference
+ */
+void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
+{
+ switch (regs->ecr.param) {
+ case 1:
+ trap_is_brkpt(address, regs);
+ break;
+
+ case 2:
+ trap_is_kprobe(address, regs);
+ break;
+
+ case 3:
+ case 4:
+ kgdb_trap(regs);
+ break;
+
+ case 5:
+ do_trap5_error(address, regs);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Entry point for Instruction Error Exception
+ * -For a corner case, ARC kprobes implementation resorts to using
+ * this exception, hence the check
+ */
+void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
+{
+ int rc;
+
+ /* Check if this exception is caused by kprobes */
+ rc = notify_die(DIE_IERR, "kprobe_ierr", regs, address, 0, SIGILL);
+ if (rc == NOTIFY_STOP)
+ return;
+
+ insterror_is_error(address, regs);
+}
+
+/*
+ * abort() call generated by older gcc for __builtin_trap()
+ */
+void abort(void)
+{
+ __asm__ __volatile__("trap_s 5\n");
+}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
new file mode 100644
index 0000000000..d5b3ed2c58
--- /dev/null
+++ b/arch/arc/kernel/troubleshoot.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/proc_fs.h>
+#include <linux/file.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+
+#include <asm/arcregs.h>
+#include <asm/irqflags.h>
+
+#define ARC_PATH_MAX 256
+
+static noinline void print_regs_scratch(struct pt_regs *regs)
+{
+ pr_cont("BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
+ regs->bta, regs->sp, regs->fp, (void *)regs->blink);
+ pr_cont("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
+
+ pr_info("r00: 0x%08lx\tr01: 0x%08lx\tr02: 0x%08lx\n" \
+ "r03: 0x%08lx\tr04: 0x%08lx\tr05: 0x%08lx\n" \
+ "r06: 0x%08lx\tr07: 0x%08lx\tr08: 0x%08lx\n" \
+ "r09: 0x%08lx\tr10: 0x%08lx\tr11: 0x%08lx\n" \
+ "r12: 0x%08lx\t",
+ regs->r0, regs->r1, regs->r2,
+ regs->r3, regs->r4, regs->r5,
+ regs->r6, regs->r7, regs->r8,
+ regs->r9, regs->r10, regs->r11,
+ regs->r12);
+}
+
+static void print_regs_callee(struct callee_regs *regs)
+{
+ pr_cont("r13: 0x%08lx\tr14: 0x%08lx\n" \
+ "r15: 0x%08lx\tr16: 0x%08lx\tr17: 0x%08lx\n" \
+ "r18: 0x%08lx\tr19: 0x%08lx\tr20: 0x%08lx\n" \
+ "r21: 0x%08lx\tr22: 0x%08lx\tr23: 0x%08lx\n" \
+ "r24: 0x%08lx\tr25: 0x%08lx\n",
+ regs->r13, regs->r14,
+ regs->r15, regs->r16, regs->r17,
+ regs->r18, regs->r19, regs->r20,
+ regs->r21, regs->r22, regs->r23,
+ regs->r24, regs->r25);
+}
+
+static void print_task_path_n_nm(struct task_struct *tsk)
+{
+ char *path_nm = NULL;
+ struct mm_struct *mm;
+ struct file *exe_file;
+ char buf[ARC_PATH_MAX];
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ goto done;
+
+ exe_file = get_mm_exe_file(mm);
+ mmput(mm);
+
+ if (exe_file) {
+ path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
+ fput(exe_file);
+ }
+
+done:
+ pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
+}
+
+static void show_faulting_vma(unsigned long address)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *active_mm = current->active_mm;
+
+ /* can't use print_vma_addr() yet as it doesn't check for
+ * non-inclusive vma
+ */
+ mmap_read_lock(active_mm);
+ vma = vma_lookup(active_mm, address);
+
+ /* Lookup the vma at the address and report if the container VMA is not
+ * found
+ */
+ if (vma) {
+ char buf[ARC_PATH_MAX];
+ char *nm = "?";
+
+ if (vma->vm_file) {
+ nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
+ if (IS_ERR(nm))
+ nm = "?";
+ }
+ pr_info(" @off 0x%lx in [%s] VMA: 0x%08lx to 0x%08lx\n",
+ vma->vm_start < TASK_UNMAPPED_BASE ?
+ address : address - vma->vm_start,
+ nm, vma->vm_start, vma->vm_end);
+ } else
+ pr_info(" @No matching VMA found\n");
+
+ mmap_read_unlock(active_mm);
+}
+
+static void show_ecr_verbose(struct pt_regs *regs)
+{
+ unsigned int vec, cause_code;
+ unsigned long address;
+
+ /* For Data fault, this is data address not instruction addr */
+ address = current->thread.fault_address;
+
+ vec = regs->ecr.vec;
+ cause_code = regs->ecr.cause;
+
+ /* For DTLB Miss or ProtV, display the memory involved too */
+ if (vec == ECR_V_DTLB_MISS) {
+ pr_cont("Invalid %s @ 0x%08lx by insn @ %pS\n",
+ (cause_code == 0x01) ? "Read" :
+ ((cause_code == 0x02) ? "Write" : "EX"),
+ address, (void *)regs->ret);
+ } else if (vec == ECR_V_ITLB_MISS) {
+ pr_cont("Insn could not be fetched\n");
+ } else if (vec == ECR_V_MACH_CHK) {
+ pr_cont("Machine Check (%s)\n", (cause_code == 0x0) ?
+ "Double Fault" : "Other Fatal Err");
+
+ } else if (vec == ECR_V_PROTV) {
+ if (cause_code == ECR_C_PROTV_INST_FETCH)
+ pr_cont("Execute from Non-exec Page\n");
+ else if (cause_code == ECR_C_PROTV_MISALIG_DATA &&
+ IS_ENABLED(CONFIG_ISA_ARCOMPACT))
+ pr_cont("Misaligned r/w from 0x%08lx\n", address);
+ else
+ pr_cont("%s access not allowed on page\n",
+ (cause_code == 0x01) ? "Read" :
+ ((cause_code == 0x02) ? "Write" : "EX"));
+ } else if (vec == ECR_V_INSN_ERR) {
+ pr_cont("Illegal Insn\n");
+#ifdef CONFIG_ISA_ARCV2
+ } else if (vec == ECR_V_MEM_ERR) {
+ if (cause_code == 0x00)
+ pr_cont("Bus Error from Insn Mem\n");
+ else if (cause_code == 0x10)
+ pr_cont("Bus Error from Data Mem\n");
+ else
+ pr_cont("Bus Error, check PRM\n");
+ } else if (vec == ECR_V_MISALIGN) {
+ pr_cont("Misaligned r/w from 0x%08lx\n", address);
+#endif
+ } else if (vec == ECR_V_TRAP) {
+ if (regs->ecr.param == 5)
+ pr_cont("gcc generated __builtin_trap\n");
+ } else {
+ pr_cont("Check Programmer's Manual\n");
+ }
+}
+
+/************************************************************************
+ * API called by rest of kernel
+ ***********************************************************************/
+
+void show_regs(struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+ struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg;
+
+ /*
+ * generic code calls us with preemption disabled, but some calls
+ * here could sleep, so re-enable to avoid lockdep splat
+ */
+ preempt_enable();
+
+ print_task_path_n_nm(tsk);
+ show_regs_print_info(KERN_INFO);
+
+ show_ecr_verbose(regs);
+
+ if (user_mode(regs))
+ show_faulting_vma(regs->ret); /* faulting code, not data */
+
+ pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
+ regs->ecr.full, current->thread.fault_address, regs->ret);
+
+ pr_info("STAT32: 0x%08lx", regs->status32);
+
+#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
+
+#ifdef CONFIG_ISA_ARCOMPACT
+ pr_cont(" [%2s%2s%2s%2s%2s%2s%2s]",
+ (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
+ STS_BIT(regs, DE), STS_BIT(regs, AE),
+ STS_BIT(regs, A2), STS_BIT(regs, A1),
+ STS_BIT(regs, E2), STS_BIT(regs, E1));
+#else
+ pr_cont(" [%2s%2s%2s%2s] ",
+ STS_BIT(regs, IE),
+ (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
+ STS_BIT(regs, DE), STS_BIT(regs, AE));
+#endif
+
+ print_regs_scratch(regs);
+ if (cregs)
+ print_regs_callee(cregs);
+
+ preempt_disable();
+}
+
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address)
+{
+ current->thread.fault_address = address;
+
+ /* Show fault description */
+ pr_info("\n%s\n", str);
+
+ /* Caller and Callee regs */
+ show_regs(regs);
+
+ /* Show stack trace if this Fatality happened in kernel mode */
+ if (!user_mode(regs))
+ show_stacktrace(current, regs, KERN_DEFAULT);
+}
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
new file mode 100644
index 0000000000..99a9b92ed9
--- /dev/null
+++ b/arch/arc/kernel/unaligned.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
+ *
+ * vineetg : May 2011
+ * -Adapted (from .26 to .35)
+ * -original contribution by Tim.yao@amlogic.com
+ */
+
+#include <linux/types.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <asm/disasm.h>
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define BE 1
+#define FIRST_BYTE_16 "swap %1, %1\n swape %1, %1\n"
+#define FIRST_BYTE_32 "swape %1, %1\n"
+#else
+#define BE 0
+#define FIRST_BYTE_16
+#define FIRST_BYTE_32
+#endif
+
+#define __get8_unaligned_check(val, addr, err) \
+ __asm__( \
+ "1: ldb.ab %1, [%2, 1]\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, 1\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 3b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (val), "=r" (addr) \
+ : "0" (err), "2" (addr))
+
+#define get16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_unaligned_check(v, a, err); \
+ val = v << ((BE) ? 8 : 0); \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << ((BE) ? 0 : 8); \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define get32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_unaligned_check(v, a, err); \
+ val = v << ((BE) ? 24 : 0); \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << ((BE) ? 16 : 8); \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << ((BE) ? 8 : 16); \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << ((BE) ? 0 : 24); \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr;\
+ \
+ __asm__( \
+ FIRST_BYTE_16 \
+ "1: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "2: stb %1, [%2]\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "4: mov %0, 1\n" \
+ " j 3b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr;\
+ \
+ __asm__( \
+ FIRST_BYTE_32 \
+ "1: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "2: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "3: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "4: stb %1, [%2]\n" \
+ "5:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "6: mov %0, 1\n" \
+ " j 5b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 6b\n" \
+ " .long 2b, 6b\n" \
+ " .long 3b, 6b\n" \
+ " .long 4b, 6b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+/* sysctl hooks */
+int unaligned_enabled __read_mostly = 1; /* Enabled by default */
+int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
+
+static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ int val;
+
+ /* register write back */
+ if ((state->aa == 1) || (state->aa == 2)) {
+ set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
+
+ if (state->aa == 2)
+ state->src2 = 0;
+ }
+
+ if (state->zz == 0) {
+ get32_unaligned_check(val, state->src1 + state->src2);
+ } else {
+ get16_unaligned_check(val, state->src1 + state->src2);
+
+ if (state->x)
+ val = (val << 16) >> 16;
+ }
+
+ if (state->pref == 0)
+ set_reg(state->dest, val, regs, cregs);
+
+ return;
+
+fault: state->fault = 1;
+}
+
+static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ /* register write back */
+ if ((state->aa == 1) || (state->aa == 2)) {
+ set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
+
+ if (state->aa == 3)
+ state->src3 = 0;
+ } else if (state->aa == 3) {
+ if (state->zz == 2) {
+ set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
+ regs, cregs);
+ } else if (!state->zz) {
+ set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
+ regs, cregs);
+ } else {
+ goto fault;
+ }
+ }
+
+ /* write fix-up */
+ if (!state->zz)
+ put32_unaligned_check(state->src1, state->src2 + state->src3);
+ else
+ put16_unaligned_check(state->src1, state->src2 + state->src3);
+
+ return;
+
+fault: state->fault = 1;
+}
+
+/*
+ * Handle an unaligned access
+ * Returns 0 if successfully handled, 1 if some error happened
+ */
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ struct disasm_state state;
+ char buf[TASK_COMM_LEN];
+
+ /* handle user mode only and only if enabled by sysadmin */
+ if (!user_mode(regs) || !unaligned_enabled)
+ return 1;
+
+ if (no_unaligned_warning) {
+ pr_warn_once("%s(%d) made unaligned access which was emulated"
+ " by kernel assist\n. This can degrade application"
+ " performance significantly\n. To enable further"
+ " logging of such instances, please \n"
+ " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
+ get_task_comm(buf, current), task_pid_nr(current));
+ } else {
+ /* Add rate limiting if it gets down to it */
+ pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
+ get_task_comm(buf, current), task_pid_nr(current),
+ address, regs->ret);
+
+ }
+
+ disasm_instr(regs->ret, &state, 1, regs, cregs);
+
+ if (state.fault)
+ goto fault;
+
+ /* ldb/stb should not have unaligned exception */
+ if ((state.zz == 1) || (state.di))
+ goto fault;
+
+ if (!state.write)
+ fixup_load(&state, regs, cregs);
+ else
+ fixup_store(&state, regs, cregs);
+
+ if (state.fault)
+ goto fault;
+
+ /* clear any remnants of delay slot */
+ if (delay_mode(regs)) {
+ regs->ret = regs->bta & ~1U;
+ regs->status32 &= ~STATUS_DE_MASK;
+ } else {
+ regs->ret += state.instr_len;
+
+ /* handle zero-overhead-loop */
+ if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
+ regs->ret = regs->lp_start;
+ regs->lp_count--;
+ }
+ }
+
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
+ return 0;
+
+fault:
+ pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
+ state.words[0], address);
+
+ return 1;
+}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
new file mode 100644
index 0000000000..9270d0a713
--- /dev/null
+++ b/arch/arc/kernel/unwind.c
@@ -0,0 +1,1315 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2002-2006 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whistles, so there
+ * is not much point in implementing the full Dwarf2 unwind API.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/memblock.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <asm/unwind.h>
+
+extern char __start_unwind[], __end_unwind[];
+/* extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];*/
+
+/* #define UNWIND_DEBUG */
+
+#ifdef UNWIND_DEBUG
+int dbg_unw;
+#define unw_debug(fmt, ...) \
+do { \
+ if (dbg_unw) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0);
+#else
+#define unw_debug(fmt, ...)
+#endif
+
+#define MAX_STACK_DEPTH 8
+
+#define EXTRA_INFO(f) { \
+ BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
+ % sizeof_field(struct unwind_frame_info, f)) \
+ + offsetof(struct unwind_frame_info, f) \
+ / sizeof_field(struct unwind_frame_info, f), \
+ sizeof_field(struct unwind_frame_info, f) \
+ }
+#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
+
+static const struct {
+ unsigned offs:BITS_PER_LONG / 2;
+ unsigned width:BITS_PER_LONG / 2;
+} reg_info[] = {
+UNW_REGISTER_INFO};
+
+#undef PTREGS_INFO
+#undef EXTRA_INFO
+
+#ifndef REG_INVALID
+#define REG_INVALID(r) (reg_info[r].width == 0)
+#endif
+
+#define DW_CFA_nop 0x00
+#define DW_CFA_set_loc 0x01
+#define DW_CFA_advance_loc1 0x02
+#define DW_CFA_advance_loc2 0x03
+#define DW_CFA_advance_loc4 0x04
+#define DW_CFA_offset_extended 0x05
+#define DW_CFA_restore_extended 0x06
+#define DW_CFA_undefined 0x07
+#define DW_CFA_same_value 0x08
+#define DW_CFA_register 0x09
+#define DW_CFA_remember_state 0x0a
+#define DW_CFA_restore_state 0x0b
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_register 0x0d
+#define DW_CFA_def_cfa_offset 0x0e
+#define DW_CFA_def_cfa_expression 0x0f
+#define DW_CFA_expression 0x10
+#define DW_CFA_offset_extended_sf 0x11
+#define DW_CFA_def_cfa_sf 0x12
+#define DW_CFA_def_cfa_offset_sf 0x13
+#define DW_CFA_val_offset 0x14
+#define DW_CFA_val_offset_sf 0x15
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_lo_user 0x1c
+#define DW_CFA_GNU_window_save 0x2d
+#define DW_CFA_GNU_args_size 0x2e
+#define DW_CFA_GNU_negative_offset_extended 0x2f
+#define DW_CFA_hi_user 0x3f
+
+#define DW_EH_PE_FORM 0x07
+#define DW_EH_PE_native 0x00
+#define DW_EH_PE_leb128 0x01
+#define DW_EH_PE_data2 0x02
+#define DW_EH_PE_data4 0x03
+#define DW_EH_PE_data8 0x04
+#define DW_EH_PE_signed 0x08
+#define DW_EH_PE_ADJUST 0x70
+#define DW_EH_PE_abs 0x00
+#define DW_EH_PE_pcrel 0x10
+#define DW_EH_PE_textrel 0x20
+#define DW_EH_PE_datarel 0x30
+#define DW_EH_PE_funcrel 0x40
+#define DW_EH_PE_aligned 0x50
+#define DW_EH_PE_indirect 0x80
+#define DW_EH_PE_omit 0xff
+
+#define CIE_ID 0
+
+typedef unsigned long uleb128_t;
+typedef signed long sleb128_t;
+
+static struct unwind_table {
+ struct {
+ unsigned long pc;
+ unsigned long range;
+ } core, init;
+ const void *address;
+ unsigned long size;
+ const unsigned char *header;
+ unsigned long hdrsz;
+ struct unwind_table *link;
+ const char *name;
+} root_table;
+
+struct unwind_item {
+ enum item_location {
+ Nowhere,
+ Memory,
+ Register,
+ Value
+ } where;
+ uleb128_t value;
+};
+
+struct unwind_state {
+ uleb128_t loc, org;
+ const u8 *cieStart, *cieEnd;
+ uleb128_t codeAlign;
+ sleb128_t dataAlign;
+ struct cfa {
+ uleb128_t reg, offs;
+ } cfa;
+ struct unwind_item regs[ARRAY_SIZE(reg_info)];
+ unsigned stackDepth:8;
+ unsigned version:8;
+ const u8 *label;
+ const u8 *stack[MAX_STACK_DEPTH];
+};
+
+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
+
+static struct unwind_table *find_table(unsigned long pc)
+{
+ struct unwind_table *table;
+
+ for (table = &root_table; table; table = table->link)
+ if ((pc >= table->core.pc
+ && pc < table->core.pc + table->core.range)
+ || (pc >= table->init.pc
+ && pc < table->init.pc + table->init.range))
+ break;
+
+ return table;
+}
+
+static unsigned long read_pointer(const u8 **pLoc,
+ const void *end, signed ptrType);
+static void init_unwind_hdr(struct unwind_table *table,
+ void *(*alloc) (unsigned long));
+
+/*
+ * wrappers for header alloc (vs. calling one vs. other at call site)
+ * to elide section mismatches warnings
+ */
+static void *__init unw_hdr_alloc_early(unsigned long sz)
+{
+ return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
+}
+
+static void init_unwind_table(struct unwind_table *table, const char *name,
+ const void *core_start, unsigned long core_size,
+ const void *init_start, unsigned long init_size,
+ const void *table_start, unsigned long table_size,
+ const u8 *header_start, unsigned long header_size)
+{
+ table->core.pc = (unsigned long)core_start;
+ table->core.range = core_size;
+ table->init.pc = (unsigned long)init_start;
+ table->init.range = init_size;
+ table->address = table_start;
+ table->size = table_size;
+ /* To avoid the pointer addition with NULL pointer.*/
+ if (header_start != NULL) {
+ const u8 *ptr = header_start + 4;
+ const u8 *end = header_start + header_size;
+ /* See if the linker provided table looks valid. */
+ if (header_size <= 4
+ || header_start[0] != 1
+ || (void *)read_pointer(&ptr, end, header_start[1])
+ != table_start
+ || header_start[2] == DW_EH_PE_omit
+ || read_pointer(&ptr, end, header_start[2]) <= 0
+ || header_start[3] == DW_EH_PE_omit)
+ header_start = NULL;
+ }
+ table->hdrsz = header_size;
+ smp_wmb();
+ table->header = header_start;
+ table->link = NULL;
+ table->name = name;
+}
+
+void __init arc_unwind_init(void)
+{
+ init_unwind_table(&root_table, "kernel", _text, _end - _text, NULL, 0,
+ __start_unwind, __end_unwind - __start_unwind,
+ NULL, 0);
+ /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
+
+ init_unwind_hdr(&root_table, unw_hdr_alloc_early);
+}
+
+static const u32 bad_cie, not_fde;
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
+static const u32 *__cie_for_fde(const u32 *fde);
+static signed fde_pointer_type(const u32 *cie);
+
+struct eh_frame_hdr_table_entry {
+ unsigned long start, fde;
+};
+
+static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
+{
+ const struct eh_frame_hdr_table_entry *e1 = p1;
+ const struct eh_frame_hdr_table_entry *e2 = p2;
+
+ return (e1->start > e2->start) - (e1->start < e2->start);
+}
+
+static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
+{
+ struct eh_frame_hdr_table_entry *e1 = p1;
+ struct eh_frame_hdr_table_entry *e2 = p2;
+
+ swap(e1->start, e2->start);
+ swap(e1->fde, e2->fde);
+}
+
+static void init_unwind_hdr(struct unwind_table *table,
+ void *(*alloc) (unsigned long))
+{
+ const u8 *ptr;
+ unsigned long tableSize = table->size, hdrSize;
+ unsigned int n;
+ const u32 *fde;
+ struct {
+ u8 version;
+ u8 eh_frame_ptr_enc;
+ u8 fde_count_enc;
+ u8 table_enc;
+ unsigned long eh_frame_ptr;
+ unsigned int fde_count;
+ struct eh_frame_hdr_table_entry table[];
+ } __attribute__ ((__packed__)) *header;
+
+ if (table->header)
+ return;
+
+ if (table->hdrsz)
+ pr_warn(".eh_frame_hdr for '%s' present but unusable\n",
+ table->name);
+
+ if (tableSize & (sizeof(*fde) - 1))
+ return;
+
+ for (fde = table->address, n = 0;
+ tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ const u32 *cie = cie_for_fde(fde, table);
+ signed ptrType;
+
+ if (cie == &not_fde)
+ continue;
+ if (cie == NULL || cie == &bad_cie)
+ goto ret_err;
+ ptrType = fde_pointer_type(cie);
+ if (ptrType < 0)
+ goto ret_err;
+
+ ptr = (const u8 *)(fde + 2);
+ if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
+ ptrType)) {
+ /* FIXME_Rajesh We have 4 instances of null addresses
+ * instead of the initial loc addr
+ * return;
+ */
+ WARN(1, "unwinder: FDE->initial_location NULL %p\n",
+ (const u8 *)(fde + 1) + *fde);
+ }
+ ++n;
+ }
+
+ if (tableSize || !n)
+ goto ret_err;
+
+ hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+ + 2 * n * sizeof(unsigned long);
+
+ header = alloc(hdrSize);
+ if (!header)
+ goto ret_err;
+
+ header->version = 1;
+ header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
+ header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
+ header->table_enc = DW_EH_PE_abs | DW_EH_PE_native;
+ put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
+ BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
+ % __alignof(typeof(header->fde_count)));
+ header->fde_count = n;
+
+ BUILD_BUG_ON(offsetof(typeof(*header), table)
+ % __alignof(typeof(*header->table)));
+ for (fde = table->address, tableSize = table->size, n = 0;
+ tableSize;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ const u32 *cie = __cie_for_fde(fde);
+
+ if (fde[1] == CIE_ID)
+ continue; /* this is a CIE */
+ ptr = (const u8 *)(fde + 2);
+ header->table[n].start = read_pointer(&ptr,
+ (const u8 *)(fde + 1) +
+ *fde,
+ fde_pointer_type(cie));
+ header->table[n].fde = (unsigned long)fde;
+ ++n;
+ }
+ WARN_ON(n != header->fde_count);
+
+ sort(header->table,
+ n,
+ sizeof(*header->table),
+ cmp_eh_frame_hdr_table_entries, swap_eh_frame_hdr_table_entries);
+
+ table->hdrsz = hdrSize;
+ smp_wmb();
+ table->header = (const void *)header;
+ return;
+
+ret_err:
+ panic("Attention !!! Dwarf FDE parsing errors\n");
+}
+
+#ifdef CONFIG_MODULES
+static void *unw_hdr_alloc(unsigned long sz)
+{
+ return kmalloc(sz, GFP_KERNEL);
+}
+
+static struct unwind_table *last_table;
+
+/* Must be called with module_mutex held. */
+void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size)
+{
+ struct unwind_table *table;
+ struct module_memory *core_text;
+ struct module_memory *init_text;
+
+ if (table_size <= 0)
+ return NULL;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ core_text = &module->mem[MOD_TEXT];
+ init_text = &module->mem[MOD_INIT_TEXT];
+
+ init_unwind_table(table, module->name, core_text->base, core_text->size,
+ init_text->base, init_text->size, table_start, table_size, NULL, 0);
+
+ init_unwind_hdr(table, unw_hdr_alloc);
+
+#ifdef UNWIND_DEBUG
+ unw_debug("Table added for [%s] %lx %lx\n",
+ module->name, table->core.pc, table->core.range);
+#endif
+ if (last_table)
+ last_table->link = table;
+ else
+ root_table.link = table;
+ last_table = table;
+
+ return table;
+}
+
+struct unlink_table_info {
+ struct unwind_table *table;
+ int init_only;
+};
+
+static int unlink_table(void *arg)
+{
+ struct unlink_table_info *info = arg;
+ struct unwind_table *table = info->table, *prev;
+
+ for (prev = &root_table; prev->link && prev->link != table;
+ prev = prev->link)
+ ;
+
+ if (prev->link) {
+ if (info->init_only) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ info->table = NULL;
+ } else {
+ prev->link = table->link;
+ if (!prev->link)
+ last_table = prev;
+ }
+ } else
+ info->table = NULL;
+
+ return 0;
+}
+
+/* Must be called with module_mutex held. */
+void unwind_remove_table(void *handle, int init_only)
+{
+ struct unwind_table *table = handle;
+ struct unlink_table_info info;
+
+ if (!table || table == &root_table)
+ return;
+
+ if (init_only && table == last_table) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ return;
+ }
+
+ info.table = table;
+ info.init_only = init_only;
+
+ unlink_table(&info); /* XXX: SMP */
+ kfree(table->header);
+ kfree(table);
+}
+
+#endif /* CONFIG_MODULES */
+
+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ uleb128_t value;
+ unsigned int shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (uleb128_t) (*cur & 0x7f) << shift;
+ if (!(*cur++ & 0x80))
+ break;
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ sleb128_t value;
+ unsigned int shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (sleb128_t) (*cur & 0x7f) << shift;
+ if (!(*cur & 0x80)) {
+ value |= -(*cur++ & 0x40) << shift;
+ break;
+ }
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static const u32 *__cie_for_fde(const u32 *fde)
+{
+ const u32 *cie;
+
+ cie = fde + 1 - fde[1] / sizeof(*fde);
+
+ return cie;
+}
+
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
+{
+ const u32 *cie;
+
+ if (!*fde || (*fde & (sizeof(*fde) - 1)))
+ return &bad_cie;
+
+ if (fde[1] == CIE_ID)
+ return &not_fde; /* this is a CIE */
+
+ if ((fde[1] & (sizeof(*fde) - 1)))
+/* || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) */
+ return NULL; /* this is not a valid FDE */
+
+ cie = __cie_for_fde(fde);
+
+ if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
+ || (*cie & (sizeof(*cie) - 1))
+ || (cie[1] != CIE_ID))
+ return NULL; /* this is not a (valid) CIE */
+ return cie;
+}
+
+static unsigned long read_pointer(const u8 **pLoc, const void *end,
+ signed ptrType)
+{
+ unsigned long value = 0;
+ union {
+ const u8 *p8;
+ const u16 *p16u;
+ const s16 *p16s;
+ const u32 *p32u;
+ const s32 *p32s;
+ const unsigned long *pul;
+ } ptr;
+
+ if (ptrType < 0 || ptrType == DW_EH_PE_omit)
+ return 0;
+ ptr.p8 = *pLoc;
+ switch (ptrType & DW_EH_PE_FORM) {
+ case DW_EH_PE_data2:
+ if (end < (const void *)(ptr.p16u + 1))
+ return 0;
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned((u16 *) ptr.p16s++);
+ else
+ value = get_unaligned((u16 *) ptr.p16u++);
+ break;
+ case DW_EH_PE_data4:
+#ifdef CONFIG_64BIT
+ if (end < (const void *)(ptr.p32u + 1))
+ return 0;
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned(ptr.p32s++);
+ else
+ value = get_unaligned(ptr.p32u++);
+ break;
+ case DW_EH_PE_data8:
+ BUILD_BUG_ON(sizeof(u64) != sizeof(value));
+#else
+ BUILD_BUG_ON(sizeof(u32) != sizeof(value));
+#endif
+ fallthrough;
+ case DW_EH_PE_native:
+ if (end < (const void *)(ptr.pul + 1))
+ return 0;
+ value = get_unaligned((unsigned long *)ptr.pul++);
+ break;
+ case DW_EH_PE_leb128:
+ BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
+ value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end)
+ : get_uleb128(&ptr.p8, end);
+ if ((const void *)ptr.p8 > end)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ switch (ptrType & DW_EH_PE_ADJUST) {
+ case DW_EH_PE_abs:
+ break;
+ case DW_EH_PE_pcrel:
+ value += (unsigned long)*pLoc;
+ break;
+ default:
+ return 0;
+ }
+ if ((ptrType & DW_EH_PE_indirect)
+ && __get_user(value, (unsigned long __user *)value))
+ return 0;
+ *pLoc = ptr.p8;
+
+ return value;
+}
+
+static signed fde_pointer_type(const u32 *cie)
+{
+ const u8 *ptr = (const u8 *)(cie + 2);
+ unsigned int version = *ptr;
+
+ if (*++ptr) {
+ const char *aug;
+ const u8 *end = (const u8 *)(cie + 1) + *cie;
+ uleb128_t len;
+
+ /* check if augmentation size is first (and thus present) */
+ if (*ptr != 'z')
+ return -1;
+
+ /* check if augmentation string is nul-terminated */
+ aug = (const void *)ptr;
+ ptr = memchr(aug, 0, end - ptr);
+ if (ptr == NULL)
+ return -1;
+
+ ++ptr; /* skip terminator */
+ get_uleb128(&ptr, end); /* skip code alignment */
+ get_sleb128(&ptr, end); /* skip data alignment */
+ /* skip return address column */
+ version <= 1 ? (void) ++ptr : (void)get_uleb128(&ptr, end);
+ len = get_uleb128(&ptr, end); /* augmentation length */
+
+ if (ptr + len < ptr || ptr + len > end)
+ return -1;
+
+ end = ptr + len;
+ while (*++aug) {
+ if (ptr >= end)
+ return -1;
+ switch (*aug) {
+ case 'L':
+ ++ptr;
+ break;
+ case 'P':{
+ signed ptrType = *ptr++;
+
+ if (!read_pointer(&ptr, end, ptrType)
+ || ptr > end)
+ return -1;
+ }
+ break;
+ case 'R':
+ return *ptr;
+ default:
+ return -1;
+ }
+ }
+ }
+ return DW_EH_PE_native | DW_EH_PE_abs;
+}
+
+static int advance_loc(unsigned long delta, struct unwind_state *state)
+{
+ state->loc += delta * state->codeAlign;
+
+ /* FIXME_Rajesh: Probably we are defining for the initial range as well;
+ return delta > 0;
+ */
+ unw_debug("delta %3lu => loc 0x%lx: ", delta, state->loc);
+ return 1;
+}
+
+static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value,
+ struct unwind_state *state)
+{
+ if (reg < ARRAY_SIZE(state->regs)) {
+ state->regs[reg].where = where;
+ state->regs[reg].value = value;
+
+#ifdef UNWIND_DEBUG
+ unw_debug("r%lu: ", reg);
+ switch (where) {
+ case Nowhere:
+ unw_debug("s ");
+ break;
+ case Memory:
+ unw_debug("c(%lu) ", value);
+ break;
+ case Register:
+ unw_debug("r(%lu) ", value);
+ break;
+ case Value:
+ unw_debug("v(%lu) ", value);
+ break;
+ default:
+ break;
+ }
+#endif
+ }
+}
+
+static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
+ signed ptrType, struct unwind_state *state)
+{
+ union {
+ const u8 *p8;
+ const u16 *p16;
+ const u32 *p32;
+ } ptr;
+ int result = 1;
+ u8 opcode;
+
+ if (start != state->cieStart) {
+ state->loc = state->org;
+ result =
+ processCFI(state->cieStart, state->cieEnd, 0, ptrType,
+ state);
+ if (targetLoc == 0 && state->label == NULL)
+ return result;
+ }
+ for (ptr.p8 = start; result && ptr.p8 < end;) {
+ switch (*ptr.p8 >> 6) {
+ uleb128_t value;
+
+ case 0:
+ opcode = *ptr.p8++;
+
+ switch (opcode) {
+ case DW_CFA_nop:
+ unw_debug("cfa nop ");
+ break;
+ case DW_CFA_set_loc:
+ state->loc = read_pointer(&ptr.p8, end,
+ ptrType);
+ if (state->loc == 0)
+ result = 0;
+ unw_debug("cfa_set_loc: 0x%lx ", state->loc);
+ break;
+ case DW_CFA_advance_loc1:
+ unw_debug("\ncfa advance loc1:");
+ result = ptr.p8 < end
+ && advance_loc(*ptr.p8++, state);
+ break;
+ case DW_CFA_advance_loc2:
+ value = *ptr.p8++;
+ value += *ptr.p8++ << 8;
+ unw_debug("\ncfa advance loc2:");
+ result = ptr.p8 <= end + 2
+ /* && advance_loc(*ptr.p16++, state); */
+ && advance_loc(value, state);
+ break;
+ case DW_CFA_advance_loc4:
+ unw_debug("\ncfa advance loc4:");
+ result = ptr.p8 <= end + 4
+ && advance_loc(*ptr.p32++, state);
+ break;
+ case DW_CFA_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_offset_extended: ");
+ set_rule(value, Memory,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_offset_extended_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Memory,
+ get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value,
+ get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_restore_extended:
+ unw_debug("cfa_restore_extended: ");
+ case DW_CFA_undefined:
+ unw_debug("cfa_undefined: ");
+ case DW_CFA_same_value:
+ unw_debug("cfa_same_value: ");
+ set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0,
+ state);
+ break;
+ case DW_CFA_register:
+ unw_debug("cfa_register: ");
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Register,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_remember_state:
+ unw_debug("cfa_remember_state: ");
+ if (ptr.p8 == state->label) {
+ state->label = NULL;
+ return 1;
+ }
+ if (state->stackDepth >= MAX_STACK_DEPTH)
+ return 0;
+ state->stack[state->stackDepth++] = ptr.p8;
+ break;
+ case DW_CFA_restore_state:
+ unw_debug("cfa_restore_state: ");
+ if (state->stackDepth) {
+ const uleb128_t loc = state->loc;
+ const u8 *label = state->label;
+
+ state->label =
+ state->stack[state->stackDepth - 1];
+ memcpy(&state->cfa, &badCFA,
+ sizeof(state->cfa));
+ memset(state->regs, 0,
+ sizeof(state->regs));
+ state->stackDepth = 0;
+ result =
+ processCFI(start, end, 0, ptrType,
+ state);
+ state->loc = loc;
+ state->label = label;
+ } else
+ return 0;
+ break;
+ case DW_CFA_def_cfa:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
+ fallthrough;
+ case DW_CFA_def_cfa_offset:
+ state->cfa.offs = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_def_cfa_offset: 0x%lx ",
+ state->cfa.offs);
+ break;
+ case DW_CFA_def_cfa_sf:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ fallthrough;
+ case DW_CFA_def_cfa_offset_sf:
+ state->cfa.offs = get_sleb128(&ptr.p8, end)
+ * state->dataAlign;
+ break;
+ case DW_CFA_def_cfa_register:
+ unw_debug("cfa_def_cfa_register: ");
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ break;
+ /*todo case DW_CFA_def_cfa_expression: */
+ /*todo case DW_CFA_expression: */
+ /*todo case DW_CFA_val_expression: */
+ case DW_CFA_GNU_args_size:
+ get_uleb128(&ptr.p8, end);
+ break;
+ case DW_CFA_GNU_negative_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Memory,
+ (uleb128_t) 0 - get_uleb128(&ptr.p8,
+ end),
+ state);
+ break;
+ case DW_CFA_GNU_window_save:
+ default:
+ unw_debug("UNKNOWN OPCODE 0x%x\n", opcode);
+ result = 0;
+ break;
+ }
+ break;
+ case 1:
+ unw_debug("\ncfa_adv_loc: ");
+ result = advance_loc(*ptr.p8++ & 0x3f, state);
+ break;
+ case 2:
+ unw_debug("cfa_offset: ");
+ value = *ptr.p8++ & 0x3f;
+ set_rule(value, Memory, get_uleb128(&ptr.p8, end),
+ state);
+ break;
+ case 3:
+ unw_debug("cfa_restore: ");
+ set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
+ break;
+ }
+
+ if (ptr.p8 > end)
+ result = 0;
+ if (result && targetLoc != 0 && targetLoc < state->loc)
+ return 1;
+ }
+
+ return result && ptr.p8 == end && (targetLoc == 0 || (
+ /*todo While in theory this should apply, gcc in practice omits
+ everything past the function prolog, and hence the location
+ never reaches the end of the function.
+ targetLoc < state->loc && */ state->label == NULL));
+}
+
+/* Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error. */
+int arc_unwind(struct unwind_frame_info *frame)
+{
+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
+ const u32 *fde = NULL, *cie = NULL;
+ const u8 *ptr = NULL, *end = NULL;
+ unsigned long pc = UNW_PC(frame) - frame->call_frame;
+ unsigned long startLoc = 0, endLoc = 0, cfa;
+ unsigned int i;
+ signed ptrType = -1;
+ uleb128_t retAddrReg = 0;
+ const struct unwind_table *table;
+ struct unwind_state state;
+ unsigned long *fptr;
+ unsigned long addr;
+
+ unw_debug("\n\nUNWIND FRAME:\n");
+ unw_debug("PC: 0x%lx BLINK: 0x%lx, SP: 0x%lx, FP: 0x%x\n",
+ UNW_PC(frame), UNW_BLINK(frame), UNW_SP(frame),
+ UNW_FP(frame));
+
+ if (UNW_PC(frame) == 0)
+ return -EINVAL;
+
+#ifdef UNWIND_DEBUG
+ {
+ unsigned long *sptr = (unsigned long *)UNW_SP(frame);
+ unw_debug("\nStack Dump:\n");
+ for (i = 0; i < 20; i++, sptr++)
+ unw_debug("0x%p: 0x%lx\n", sptr, *sptr);
+ unw_debug("\n");
+ }
+#endif
+
+ table = find_table(pc);
+ if (table != NULL
+ && !(table->size & (sizeof(*fde) - 1))) {
+ const u8 *hdr = table->header;
+ unsigned long tableSize;
+
+ smp_rmb();
+ if (hdr && hdr[0] == 1) {
+ switch (hdr[3] & DW_EH_PE_FORM) {
+ case DW_EH_PE_native:
+ tableSize = sizeof(unsigned long);
+ break;
+ case DW_EH_PE_data2:
+ tableSize = 2;
+ break;
+ case DW_EH_PE_data4:
+ tableSize = 4;
+ break;
+ case DW_EH_PE_data8:
+ tableSize = 8;
+ break;
+ default:
+ tableSize = 0;
+ break;
+ }
+ ptr = hdr + 4;
+ end = hdr + table->hdrsz;
+ if (tableSize && read_pointer(&ptr, end, hdr[1])
+ == (unsigned long)table->address
+ && (i = read_pointer(&ptr, end, hdr[2])) > 0
+ && i == (end - ptr) / (2 * tableSize)
+ && !((end - ptr) % (2 * tableSize))) {
+ do {
+ const u8 *cur =
+ ptr + (i / 2) * (2 * tableSize);
+
+ startLoc = read_pointer(&cur,
+ cur + tableSize,
+ hdr[3]);
+ if (pc < startLoc)
+ i /= 2;
+ else {
+ ptr = cur - tableSize;
+ i = (i + 1) / 2;
+ }
+ } while (startLoc && i > 1);
+ if (i == 1
+ && (startLoc = read_pointer(&ptr,
+ ptr + tableSize,
+ hdr[3])) != 0
+ && pc >= startLoc)
+ fde = (void *)read_pointer(&ptr,
+ ptr +
+ tableSize,
+ hdr[3]);
+ }
+ }
+
+ if (fde != NULL) {
+ cie = cie_for_fde(fde, table);
+ ptr = (const u8 *)(fde + 2);
+ if (cie != NULL
+ && cie != &bad_cie
+ && cie != &not_fde
+ && (ptrType = fde_pointer_type(cie)) >= 0
+ && read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType) == startLoc) {
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &=
+ DW_EH_PE_FORM | DW_EH_PE_signed;
+ endLoc =
+ startLoc + read_pointer(&ptr,
+ (const u8 *)(fde +
+ 1) +
+ *fde, ptrType);
+ if (pc >= endLoc) {
+ fde = NULL;
+ cie = NULL;
+ }
+ } else {
+ fde = NULL;
+ cie = NULL;
+ }
+ }
+ }
+ if (cie != NULL) {
+ memset(&state, 0, sizeof(state));
+ state.cieEnd = ptr; /* keep here temporarily */
+ ptr = (const u8 *)(cie + 2);
+ end = (const u8 *)(cie + 1) + *cie;
+ frame->call_frame = 1;
+ if (*++ptr) {
+ /* check if augmentation size is first (thus present) */
+ if (*ptr == 'z') {
+ while (++ptr < end && *ptr) {
+ switch (*ptr) {
+ /* chk for ignorable or already handled
+ * nul-terminated augmentation string */
+ case 'L':
+ case 'P':
+ case 'R':
+ continue;
+ case 'S':
+ frame->call_frame = 0;
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ if (ptr >= end || *ptr)
+ cie = NULL;
+ }
+ ++ptr;
+ }
+ if (cie != NULL) {
+ /* get code alignment factor */
+ state.codeAlign = get_uleb128(&ptr, end);
+ /* get data alignment factor */
+ state.dataAlign = get_sleb128(&ptr, end);
+ if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
+ cie = NULL;
+ else {
+ retAddrReg =
+ state.version <= 1 ? *ptr++ : get_uleb128(&ptr,
+ end);
+ unw_debug("CIE Frame Info:\n");
+ unw_debug("return Address register 0x%lx\n",
+ retAddrReg);
+ unw_debug("data Align: %ld\n", state.dataAlign);
+ unw_debug("code Align: %lu\n", state.codeAlign);
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ ptr += augSize;
+ }
+ if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(retAddrReg)
+ || reg_info[retAddrReg].width !=
+ sizeof(unsigned long))
+ cie = NULL;
+ }
+ }
+ if (cie != NULL) {
+ state.cieStart = ptr;
+ ptr = state.cieEnd;
+ state.cieEnd = end;
+ end = (const u8 *)(fde + 1) + *fde;
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ if ((ptr += augSize) > end)
+ fde = NULL;
+ }
+ }
+ if (cie == NULL || fde == NULL) {
+#ifdef CONFIG_FRAME_POINTER
+ unsigned long top, bottom;
+
+ top = STACK_TOP_UNW(frame->task);
+ bottom = STACK_BOTTOM_UNW(frame->task);
+#if FRAME_RETADDR_OFFSET < 0
+ if (UNW_SP(frame) < top && UNW_FP(frame) <= UNW_SP(frame)
+ && bottom < UNW_FP(frame)
+#else
+ if (UNW_SP(frame) > top && UNW_FP(frame) >= UNW_SP(frame)
+ && bottom > UNW_FP(frame)
+#endif
+ && !((UNW_SP(frame) | UNW_FP(frame))
+ & (sizeof(unsigned long) - 1))) {
+ unsigned long link;
+
+ if (!__get_user(link, (unsigned long *)
+ (UNW_FP(frame) + FRAME_LINK_OFFSET))
+#if FRAME_RETADDR_OFFSET < 0
+ && link > bottom && link < UNW_FP(frame)
+#else
+ && link > UNW_FP(frame) && link < bottom
+#endif
+ && !(link & (sizeof(link) - 1))
+ && !__get_user(UNW_PC(frame),
+ (unsigned long *)(UNW_FP(frame)
+ + FRAME_RETADDR_OFFSET)))
+ {
+ UNW_SP(frame) =
+ UNW_FP(frame) + FRAME_RETADDR_OFFSET
+#if FRAME_RETADDR_OFFSET < 0
+ -
+#else
+ +
+#endif
+ sizeof(UNW_PC(frame));
+ UNW_FP(frame) = link;
+ return 0;
+ }
+ }
+#endif
+ return -ENXIO;
+ }
+ state.org = startLoc;
+ memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
+
+ unw_debug("\nProcess instructions\n");
+
+ /* process instructions
+ * For ARC, we optimize by having blink(retAddrReg) with
+ * the sameValue in the leaf function, so we should not check
+ * state.regs[retAddrReg].where == Nowhere
+ */
+ if (!processCFI(ptr, end, pc, ptrType, &state)
+ || state.loc > endLoc
+/* || state.regs[retAddrReg].where == Nowhere */
+ || state.cfa.reg >= ARRAY_SIZE(reg_info)
+ || reg_info[state.cfa.reg].width != sizeof(unsigned long)
+ || state.cfa.offs % sizeof(unsigned long))
+ return -EIO;
+
+#ifdef UNWIND_DEBUG
+ unw_debug("\n");
+
+ unw_debug("\nRegister State Based on the rules parsed from FDE:\n");
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+
+ if (REG_INVALID(i))
+ continue;
+
+ switch (state.regs[i].where) {
+ case Nowhere:
+ break;
+ case Memory:
+ unw_debug(" r%d: c(%lu),", i, state.regs[i].value);
+ break;
+ case Register:
+ unw_debug(" r%d: r(%lu),", i, state.regs[i].value);
+ break;
+ case Value:
+ unw_debug(" r%d: v(%lu),", i, state.regs[i].value);
+ break;
+ }
+ }
+
+ unw_debug("\n");
+#endif
+
+ /* update frame */
+ if (frame->call_frame
+ && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
+ frame->call_frame = 0;
+ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
+ startLoc = min_t(unsigned long, UNW_SP(frame), cfa);
+ endLoc = max_t(unsigned long, UNW_SP(frame), cfa);
+ if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
+ startLoc = min(STACK_LIMIT(cfa), cfa);
+ endLoc = max(STACK_LIMIT(cfa), cfa);
+ }
+
+ unw_debug("\nCFA reg: 0x%lx, offset: 0x%lx => 0x%lx\n",
+ state.cfa.reg, state.cfa.offs, cfa);
+
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+ if (REG_INVALID(i)) {
+ if (state.regs[i].where == Nowhere)
+ continue;
+ return -EIO;
+ }
+ switch (state.regs[i].where) {
+ default:
+ break;
+ case Register:
+ if (state.regs[i].value >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(state.regs[i].value)
+ || reg_info[i].width >
+ reg_info[state.regs[i].value].width)
+ return -EIO;
+ switch (reg_info[state.regs[i].value].width) {
+ case sizeof(u8):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u8);
+ break;
+ case sizeof(u16):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u16);
+ break;
+ case sizeof(u32):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u32);
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u64);
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+ break;
+ }
+ }
+
+ unw_debug("\nRegister state after evaluation with realtime Stack:\n");
+ fptr = (unsigned long *)(&frame->regs);
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i, fptr++) {
+
+ if (REG_INVALID(i))
+ continue;
+ switch (state.regs[i].where) {
+ case Nowhere:
+ if (reg_info[i].width != sizeof(UNW_SP(frame))
+ || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
+ != &UNW_SP(frame))
+ continue;
+ UNW_SP(frame) = cfa;
+ break;
+ case Register:
+ switch (reg_info[i].width) {
+ case sizeof(u8):
+ FRAME_REG(i, u8) = state.regs[i].value;
+ break;
+ case sizeof(u16):
+ FRAME_REG(i, u16) = state.regs[i].value;
+ break;
+ case sizeof(u32):
+ FRAME_REG(i, u32) = state.regs[i].value;
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ FRAME_REG(i, u64) = state.regs[i].value;
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+ break;
+ case Value:
+ if (reg_info[i].width != sizeof(unsigned long))
+ return -EIO;
+ FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
+ * state.dataAlign;
+ break;
+ case Memory:
+ addr = cfa + state.regs[i].value * state.dataAlign;
+
+ if ((state.regs[i].value * state.dataAlign)
+ % sizeof(unsigned long)
+ || addr < startLoc
+ || addr + sizeof(unsigned long) < addr
+ || addr + sizeof(unsigned long) > endLoc)
+ return -EIO;
+
+ switch (reg_info[i].width) {
+ case sizeof(u8):
+ __get_user(FRAME_REG(i, u8),
+ (u8 __user *)addr);
+ break;
+ case sizeof(u16):
+ __get_user(FRAME_REG(i, u16),
+ (u16 __user *)addr);
+ break;
+ case sizeof(u32):
+ __get_user(FRAME_REG(i, u32),
+ (u32 __user *)addr);
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ __get_user(FRAME_REG(i, u64),
+ (u64 __user *)addr);
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+
+ break;
+ }
+ unw_debug("r%d: 0x%lx ", i, *fptr);
+ }
+
+ return 0;
+#undef FRAME_REG
+}
+EXPORT_SYMBOL(arc_unwind);
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
new file mode 100644
index 0000000000..549c3f4079
--- /dev/null
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+OUTPUT_ARCH(arc)
+ENTRY(res_service)
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+SECTIONS
+{
+ /*
+ * ICCM starts at 0x8000_0000. So if kernel is relocated to some other
+ * address, make sure peripheral at 0x8z doesn't clash with ICCM
+ * Essentially vector is also in ICCM.
+ */
+
+ . = CONFIG_LINUX_LINK_BASE;
+
+ _int_vec_base_lds = .;
+ .vector : {
+ *(.vector)
+ . = ALIGN(PAGE_SIZE);
+ }
+
+#ifdef CONFIG_ARC_HAS_ICCM
+ .text.arcfp : {
+ *(.text.arcfp)
+ . = ALIGN(CONFIG_ARC_ICCM_SZ * 1024);
+ }
+#endif
+
+ /*
+ * The reason for having a seperate subsection .init.ramfs is to
+ * prevent objump from including it in kernel dumps
+ *
+ * Reason for having .init.ramfs above .init is to make sure that the
+ * binary blob is tucked away to one side, reducing the displacement
+ * between .init.text and .text, avoiding any possible relocation
+ * errors because of calls from .init.text to .text
+ * Yes such calls do exist. e.g.
+ * decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( )
+ */
+
+ __init_begin = .;
+
+ .init.ramfs : { INIT_RAM_FS }
+
+ . = ALIGN(PAGE_SIZE);
+
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(L1_CACHE_BYTES)
+
+ /* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */
+ .init.data : {
+ INIT_DATA
+ INIT_SETUP(L1_CACHE_BYTES)
+ INIT_CALLS
+ CON_INITCALL
+ }
+
+ .init.arch.info : {
+ __arch_info_begin = .;
+ *(.arch.info.init)
+ __arch_info_end = .;
+ }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
+ .text : {
+ _text = .;
+ _stext = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ }
+ EXCEPTION_TABLE(L1_CACHE_BYTES)
+ _etext = .;
+
+ _sdata = .;
+ RO_DATA(PAGE_SIZE)
+
+ /*
+ * 1. this is .data essentially
+ * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned
+ */
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+
+ _edata = .;
+
+ BSS_SECTION(4, 4, 4)
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+ . = ALIGN(PAGE_SIZE);
+ .eh_frame : {
+ __start_unwind = .;
+ *(.eh_frame)
+ __end_unwind = .;
+ }
+#else
+ /DISCARD/ : { *(.eh_frame) }
+#endif
+
+ . = ALIGN(PAGE_SIZE);
+ _end = . ;
+
+ STABS_DEBUG
+ ELF_DETAILS
+ DISCARDS
+
+ .arcextmap 0 : {
+ *(.gnu.linkonce.arcextmap.*)
+ *(.arcextmap.*)
+ }
+
+#ifndef CONFIG_DEBUG_INFO
+ /DISCARD/ : { *(.debug_frame) }
+ /DISCARD/ : { *(.debug_aranges) }
+ /DISCARD/ : { *(.debug_pubnames) }
+ /DISCARD/ : { *(.debug_info) }
+ /DISCARD/ : { *(.debug_abbrev) }
+ /DISCARD/ : { *(.debug_line) }
+ /DISCARD/ : { *(.debug_str) }
+ /DISCARD/ : { *(.debug_loc) }
+ /DISCARD/ : { *(.debug_macinfo) }
+ /DISCARD/ : { *(.debug_ranges) }
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCCM
+ . = CONFIG_ARC_DCCM_BASE;
+ __arc_dccm_base = .;
+ .data.arcfp : {
+ *(.data.arcfp)
+ }
+ . = ALIGN(CONFIG_ARC_DCCM_SZ * 1024);
+#endif
+}
diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile
new file mode 100644
index 0000000000..30158ae69f
--- /dev/null
+++ b/arch/arc/lib/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+
+lib-y := strchr-700.o strcpy-700.o strlen.o memcmp.o
+
+lib-$(CONFIG_ISA_ARCOMPACT) += memcpy-700.o memset.o strcmp.o
+lib-$(CONFIG_ISA_ARCV2) += memset-archs.o strcmp-archs.o
+
+ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs-unaligned.o
+else
+lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs.o
+endif
diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
new file mode 100644
index 0000000000..d6dc5e9bc4
--- /dev/null
+++ b/arch/arc/lib/memcmp.S
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/linkage.h>
+
+#ifdef __LITTLE_ENDIAN__
+#define WORD2 r2
+#define SHIFT r3
+#else /* BIG ENDIAN */
+#define WORD2 r3
+#define SHIFT r2
+#endif
+
+ENTRY_CFI(memcmp)
+ or r12,r0,r1
+ asl_s r12,r12,30
+ sub r3,r2,1
+ brls r2,r12,.Lbytewise
+ ld r4,[r0,0]
+ ld r5,[r1,0]
+ lsr.f lp_count,r3,3
+#ifdef CONFIG_ISA_ARCV2
+ /* In ARCv2 a branch can't be the last instruction in a zero overhead
+ * loop.
+ * So we move the branch to the start of the loop, duplicate it
+ * after the end, and set up r12 so that the branch isn't taken
+ * initially.
+ */
+ mov_s r12,WORD2
+ lpne .Loop_end
+ brne WORD2,r12,.Lodd
+ ld WORD2,[r0,4]
+#else
+ lpne .Loop_end
+ ld_s WORD2,[r0,4]
+#endif
+ ld_s r12,[r1,4]
+ brne r4,r5,.Leven
+ ld.a r4,[r0,8]
+ ld.a r5,[r1,8]
+#ifdef CONFIG_ISA_ARCV2
+.Loop_end:
+ brne WORD2,r12,.Lodd
+#else
+ brne WORD2,r12,.Lodd
+.Loop_end:
+#endif
+ asl_s SHIFT,SHIFT,3
+ bhs_s .Last_cmp
+ brne r4,r5,.Leven
+ ld r4,[r0,4]
+ ld r5,[r1,4]
+#ifdef __LITTLE_ENDIAN__
+ nop_s
+ ; one more load latency cycle
+.Last_cmp:
+ xor r0,r4,r5
+ bset r0,r0,SHIFT
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ b.d .Leven_cmp
+ and r1,r1,24
+.Leven:
+ xor r0,r4,r5
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ ; slow track insn
+ and r1,r1,24
+.Leven_cmp:
+ asl r2,r4,r1
+ asl r12,r5,r1
+ lsr_s r2,r2,1
+ lsr_s r12,r12,1
+ j_s.d [blink]
+ sub r0,r2,r12
+ .balign 4
+.Lodd:
+ xor r0,WORD2,r12
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ ; slow track insn
+ and r1,r1,24
+ asl_s r2,r2,r1
+ asl_s r12,r12,r1
+ lsr_s r2,r2,1
+ lsr_s r12,r12,1
+ j_s.d [blink]
+ sub r0,r2,r12
+#else /* BIG ENDIAN */
+.Last_cmp:
+ neg_s SHIFT,SHIFT
+ lsr r4,r4,SHIFT
+ lsr r5,r5,SHIFT
+ ; slow track insn
+.Leven:
+ sub.f r0,r4,r5
+ mov.ne r0,1
+ j_s.d [blink]
+ bset.cs r0,r0,31
+.Lodd:
+ cmp_s WORD2,r12
+ mov_s r0,1
+ j_s.d [blink]
+ bset.cs r0,r0,31
+#endif /* ENDIAN */
+ .balign 4
+.Lbytewise:
+ breq r2,0,.Lnil
+ ldb r4,[r0,0]
+ ldb r5,[r1,0]
+ lsr.f lp_count,r3
+#ifdef CONFIG_ISA_ARCV2
+ mov r12,r3
+ lpne .Lbyte_end
+ brne r3,r12,.Lbyte_odd
+#else
+ lpne .Lbyte_end
+#endif
+ ldb_s r3,[r0,1]
+ ldb r12,[r1,1]
+ brne r4,r5,.Lbyte_even
+ ldb.a r4,[r0,2]
+ ldb.a r5,[r1,2]
+#ifdef CONFIG_ISA_ARCV2
+.Lbyte_end:
+ brne r3,r12,.Lbyte_odd
+#else
+ brne r3,r12,.Lbyte_odd
+.Lbyte_end:
+#endif
+ bcc .Lbyte_even
+ brne r4,r5,.Lbyte_even
+ ldb_s r3,[r0,1]
+ ldb_s r12,[r1,1]
+.Lbyte_odd:
+ j_s.d [blink]
+ sub r0,r3,r12
+.Lbyte_even:
+ j_s.d [blink]
+ sub r0,r4,r5
+.Lnil:
+ j_s.d [blink]
+ mov r0,0
+END_CFI(memcmp)
diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
new file mode 100644
index 0000000000..f2e239e219
--- /dev/null
+++ b/arch/arc/lib/memcpy-700.S
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/linkage.h>
+
+ENTRY_CFI(memcpy)
+ or r3,r0,r1
+ asl_s r3,r3,30
+ mov_s r5,r0
+ brls.d r2,r3,.Lcopy_bytewise
+ sub.f r3,r2,1
+ ld_s r12,[r1,0]
+ asr.f lp_count,r3,3
+ bbit0.d r3,2,.Lnox4
+ bmsk_s r2,r2,1
+ st.ab r12,[r5,4]
+ ld.a r12,[r1,4]
+.Lnox4:
+ lppnz .Lendloop
+ ld_s r3,[r1,4]
+ st.ab r12,[r5,4]
+ ld.a r12,[r1,8]
+ st.ab r3,[r5,4]
+.Lendloop:
+ breq r2,0,.Last_store
+ ld r3,[r5,0]
+#ifdef __LITTLE_ENDIAN__
+ add3 r2,-1,r2
+ ; uses long immediate
+ xor_s r12,r12,r3
+ bmsk r12,r12,r2
+ xor_s r12,r12,r3
+#else /* BIG ENDIAN */
+ sub3 r2,31,r2
+ ; uses long immediate
+ xor_s r3,r3,r12
+ bmsk r3,r3,r2
+ xor_s r12,r12,r3
+#endif /* ENDIAN */
+.Last_store:
+ j_s.d [blink]
+ st r12,[r5,0]
+
+ .balign 4
+.Lcopy_bytewise:
+ jcs [blink]
+ ldb_s r12,[r1,0]
+ lsr.f lp_count,r3
+ bhs_s .Lnox1
+ stb.ab r12,[r5,1]
+ ldb.a r12,[r1,1]
+.Lnox1:
+ lppnz .Lendbloop
+ ldb_s r3,[r1,1]
+ stb.ab r12,[r5,1]
+ ldb.a r12,[r1,2]
+ stb.ab r3,[r5,1]
+.Lendbloop:
+ j_s.d [blink]
+ stb r12,[r5,0]
+END_CFI(memcpy)
diff --git a/arch/arc/lib/memcpy-archs-unaligned.S b/arch/arc/lib/memcpy-archs-unaligned.S
new file mode 100644
index 0000000000..28993a73fd
--- /dev/null
+++ b/arch/arc/lib/memcpy-archs-unaligned.S
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ARCv2 memcpy implementation optimized for unaligned memory access using.
+ *
+ * Copyright (C) 2019 Synopsys
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+
+#include <linux/linkage.h>
+
+#ifdef CONFIG_ARC_HAS_LL64
+# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
+# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
+# define ZOLSHFT 5
+# define ZOLAND 0x1F
+#else
+# define LOADX(DST,RX) ld.ab DST, [RX, 4]
+# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
+# define ZOLSHFT 4
+# define ZOLAND 0xF
+#endif
+
+ENTRY_CFI(memcpy)
+ mov r3, r0 ; don;t clobber ret val
+
+ lsr.f lp_count, r2, ZOLSHFT
+ lpnz @.Lcopy32_64bytes
+ ;; LOOP START
+ LOADX (r6, r1)
+ LOADX (r8, r1)
+ LOADX (r10, r1)
+ LOADX (r4, r1)
+ STOREX (r6, r3)
+ STOREX (r8, r3)
+ STOREX (r10, r3)
+ STOREX (r4, r3)
+.Lcopy32_64bytes:
+
+ and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes
+ lpnz @.Lcopyremainingbytes
+ ;; LOOP START
+ ldb.ab r5, [r1, 1]
+ stb.ab r5, [r3, 1]
+.Lcopyremainingbytes:
+
+ j [blink]
+END_CFI(memcpy)
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
new file mode 100644
index 0000000000..0051a84f60
--- /dev/null
+++ b/arch/arc/lib/memcpy-archs.S
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/linkage.h>
+
+#ifdef __LITTLE_ENDIAN__
+# define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; <<
+# define SHIFT_2(RX,RY,IMM) lsr RX, RY, IMM ; >>
+# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM
+# define MERGE_2(RX,RY,IMM)
+# define EXTRACT_1(RX,RY,IMM) and RX, RY, 0xFFFF
+# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, IMM
+#else
+# define SHIFT_1(RX,RY,IMM) lsr RX, RY, IMM ; >>
+# define SHIFT_2(RX,RY,IMM) asl RX, RY, IMM ; <<
+# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM ; <<
+# define MERGE_2(RX,RY,IMM) asl RX, RY, IMM ; <<
+# define EXTRACT_1(RX,RY,IMM) lsr RX, RY, IMM
+# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08
+#endif
+
+#ifdef CONFIG_ARC_HAS_LL64
+# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
+# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
+# define ZOLSHFT 5
+# define ZOLAND 0x1F
+#else
+# define LOADX(DST,RX) ld.ab DST, [RX, 4]
+# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
+# define ZOLSHFT 4
+# define ZOLAND 0xF
+#endif
+
+ENTRY_CFI(memcpy)
+ mov.f 0, r2
+;;; if size is zero
+ jz.d [blink]
+ mov r3, r0 ; don;t clobber ret val
+
+;;; if size <= 8
+ cmp r2, 8
+ bls.d @.Lsmallchunk
+ mov.f lp_count, r2
+
+ and.f r4, r0, 0x03
+ rsub lp_count, r4, 4
+ lpnz @.Laligndestination
+ ;; LOOP BEGIN
+ ldb.ab r5, [r1,1]
+ sub r2, r2, 1
+ stb.ab r5, [r3,1]
+.Laligndestination:
+
+;;; Check the alignment of the source
+ and.f r4, r1, 0x03
+ bnz.d @.Lsourceunaligned
+
+;;; CASE 0: Both source and destination are 32bit aligned
+;;; Convert len to Dwords, unfold x4
+ lsr.f lp_count, r2, ZOLSHFT
+ lpnz @.Lcopy32_64bytes
+ ;; LOOP START
+ LOADX (r6, r1)
+ LOADX (r8, r1)
+ LOADX (r10, r1)
+ LOADX (r4, r1)
+ STOREX (r6, r3)
+ STOREX (r8, r3)
+ STOREX (r10, r3)
+ STOREX (r4, r3)
+.Lcopy32_64bytes:
+
+ and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes
+.Lsmallchunk:
+ lpnz @.Lcopyremainingbytes
+ ;; LOOP START
+ ldb.ab r5, [r1,1]
+ stb.ab r5, [r3,1]
+.Lcopyremainingbytes:
+
+ j [blink]
+;;; END CASE 0
+
+.Lsourceunaligned:
+ cmp r4, 2
+ beq.d @.LunalignedOffby2
+ sub r2, r2, 1
+
+ bhi.d @.LunalignedOffby3
+ ldb.ab r5, [r1, 1]
+
+;;; CASE 1: The source is unaligned, off by 1
+ ;; Hence I need to read 1 byte for a 16bit alignment
+ ;; and 2bytes to reach 32bit alignment
+ ldh.ab r6, [r1, 2]
+ sub r2, r2, 2
+ ;; Convert to words, unfold x2
+ lsr.f lp_count, r2, 3
+ MERGE_1 (r6, r6, 8)
+ MERGE_2 (r5, r5, 24)
+ or r5, r5, r6
+
+ ;; Both src and dst are aligned
+ lpnz @.Lcopy8bytes_1
+ ;; LOOP START
+ ld.ab r6, [r1, 4]
+ ld.ab r8, [r1,4]
+
+ SHIFT_1 (r7, r6, 24)
+ or r7, r7, r5
+ SHIFT_2 (r5, r6, 8)
+
+ SHIFT_1 (r9, r8, 24)
+ or r9, r9, r5
+ SHIFT_2 (r5, r8, 8)
+
+ st.ab r7, [r3, 4]
+ st.ab r9, [r3, 4]
+.Lcopy8bytes_1:
+
+ ;; Write back the remaining 16bits
+ EXTRACT_1 (r6, r5, 16)
+ sth.ab r6, [r3, 2]
+ ;; Write back the remaining 8bits
+ EXTRACT_2 (r5, r5, 16)
+ stb.ab r5, [r3, 1]
+
+ and.f lp_count, r2, 0x07 ;Last 8bytes
+ lpnz @.Lcopybytewise_1
+ ;; LOOP START
+ ldb.ab r6, [r1,1]
+ stb.ab r6, [r3,1]
+.Lcopybytewise_1:
+ j [blink]
+
+.LunalignedOffby2:
+;;; CASE 2: The source is unaligned, off by 2
+ ldh.ab r5, [r1, 2]
+ sub r2, r2, 1
+
+ ;; Both src and dst are aligned
+ ;; Convert to words, unfold x2
+ lsr.f lp_count, r2, 3
+#ifdef __BIG_ENDIAN__
+ asl.nz r5, r5, 16
+#endif
+ lpnz @.Lcopy8bytes_2
+ ;; LOOP START
+ ld.ab r6, [r1, 4]
+ ld.ab r8, [r1,4]
+
+ SHIFT_1 (r7, r6, 16)
+ or r7, r7, r5
+ SHIFT_2 (r5, r6, 16)
+
+ SHIFT_1 (r9, r8, 16)
+ or r9, r9, r5
+ SHIFT_2 (r5, r8, 16)
+
+ st.ab r7, [r3, 4]
+ st.ab r9, [r3, 4]
+.Lcopy8bytes_2:
+
+#ifdef __BIG_ENDIAN__
+ lsr.nz r5, r5, 16
+#endif
+ sth.ab r5, [r3, 2]
+
+ and.f lp_count, r2, 0x07 ;Last 8bytes
+ lpnz @.Lcopybytewise_2
+ ;; LOOP START
+ ldb.ab r6, [r1,1]
+ stb.ab r6, [r3,1]
+.Lcopybytewise_2:
+ j [blink]
+
+.LunalignedOffby3:
+;;; CASE 3: The source is unaligned, off by 3
+;;; Hence, I need to read 1byte for achieve the 32bit alignment
+
+ ;; Both src and dst are aligned
+ ;; Convert to words, unfold x2
+ lsr.f lp_count, r2, 3
+#ifdef __BIG_ENDIAN__
+ asl.ne r5, r5, 24
+#endif
+ lpnz @.Lcopy8bytes_3
+ ;; LOOP START
+ ld.ab r6, [r1, 4]
+ ld.ab r8, [r1,4]
+
+ SHIFT_1 (r7, r6, 8)
+ or r7, r7, r5
+ SHIFT_2 (r5, r6, 24)
+
+ SHIFT_1 (r9, r8, 8)
+ or r9, r9, r5
+ SHIFT_2 (r5, r8, 24)
+
+ st.ab r7, [r3, 4]
+ st.ab r9, [r3, 4]
+.Lcopy8bytes_3:
+
+#ifdef __BIG_ENDIAN__
+ lsr.nz r5, r5, 24
+#endif
+ stb.ab r5, [r3, 1]
+
+ and.f lp_count, r2, 0x07 ;Last 8bytes
+ lpnz @.Lcopybytewise_3
+ ;; LOOP START
+ ldb.ab r6, [r1,1]
+ stb.ab r6, [r3,1]
+.Lcopybytewise_3:
+ j [blink]
+
+END_CFI(memcpy)
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
new file mode 100644
index 0000000000..d0a5cec4cd
--- /dev/null
+++ b/arch/arc/lib/memset-archs.S
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/linkage.h>
+#include <asm/cache.h>
+
+/*
+ * The memset implementation below is optimized to use prefetchw and prealloc
+ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
+ * If you want to implement optimized memset for other possible L1 data cache
+ * line lengths (32B and 128B) you should rewrite code carefully checking
+ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
+ * don't belongs to memset area.
+ */
+
+#if L1_CACHE_SHIFT == 6
+
+.macro PREALLOC_INSTR reg, off
+ prealloc [\reg, \off]
+.endm
+
+.macro PREFETCHW_INSTR reg, off
+ prefetchw [\reg, \off]
+.endm
+
+#else
+
+.macro PREALLOC_INSTR reg, off
+.endm
+
+.macro PREFETCHW_INSTR reg, off
+.endm
+
+#endif
+
+ENTRY_CFI(memset)
+ mov.f 0, r2
+;;; if size is zero
+ jz.d [blink]
+ mov r3, r0 ; don't clobber ret val
+
+ PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
+
+;;; if length < 8
+ brls.d.nt r2, 8, .Lsmallchunk
+ mov.f lp_count,r2
+
+ and.f r4, r0, 0x03
+ rsub lp_count, r4, 4
+ lpnz @.Laligndestination
+ ;; LOOP BEGIN
+ stb.ab r1, [r3,1]
+ sub r2, r2, 1
+.Laligndestination:
+
+;;; Destination is aligned
+ and r1, r1, 0xFF
+ asl r4, r1, 8
+ or r4, r4, r1
+ asl r5, r4, 16
+ or r5, r5, r4
+ mov r4, r5
+
+ sub3 lp_count, r2, 8
+ cmp r2, 64
+ bmsk.hi r2, r2, 5
+ mov.ls lp_count, 0
+ add3.hi r2, r2, 8
+
+;;; Convert len to Dwords, unfold x8
+ lsr.f lp_count, lp_count, 6
+
+ lpnz @.Lset64bytes
+ ;; LOOP START
+ PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
+
+#ifdef CONFIG_ARC_HAS_LL64
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+#else
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+#endif
+.Lset64bytes:
+
+ lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
+ lpnz .Lset32bytes
+ ;; LOOP START
+#ifdef CONFIG_ARC_HAS_LL64
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+#else
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+#endif
+.Lset32bytes:
+
+ and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
+.Lsmallchunk:
+ lpnz .Lcopy3bytes
+ ;; LOOP START
+ stb.ab r1, [r3, 1]
+.Lcopy3bytes:
+
+ j [blink]
+
+END_CFI(memset)
+
+ENTRY_CFI(memzero)
+ ; adjust bzero args to memset args
+ mov r2, r1
+ b.d memset ;tail call so need to tinker with blink
+ mov r1, 0
+END_CFI(memzero)
diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
new file mode 100644
index 0000000000..9f35960da1
--- /dev/null
+++ b/arch/arc/lib/memset.S
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/linkage.h>
+
+#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */
+
+ENTRY_CFI(memset)
+ mov_s r4,r0
+ or r12,r0,r2
+ bmsk.f r12,r12,1
+ extb_s r1,r1
+ asl r3,r1,8
+ beq.d .Laligned
+ or_s r1,r1,r3
+ brls r2,SMALL,.Ltiny
+ add r3,r2,r0
+ stb r1,[r3,-1]
+ bclr_s r3,r3,0
+ stw r1,[r3,-2]
+ bmsk.f r12,r0,1
+ add_s r2,r2,r12
+ sub.ne r2,r2,4
+ stb.ab r1,[r4,1]
+ and r4,r4,-2
+ stw.ab r1,[r4,2]
+ and r4,r4,-4
+.Laligned: ; This code address should be aligned for speed.
+ asl r3,r1,16
+ lsr.f lp_count,r2,2
+ or_s r1,r1,r3
+ lpne .Loop_end
+ st.ab r1,[r4,4]
+.Loop_end:
+ j_s [blink]
+
+ .balign 4
+.Ltiny:
+ mov.f lp_count,r2
+ lpne .Ltiny_end
+ stb.ab r1,[r4,1]
+.Ltiny_end:
+ j_s [blink]
+END_CFI(memset)
+
+; memzero: @r0 = mem, @r1 = size_t
+; memset: @r0 = mem, @r1 = char, @r2 = size_t
+
+ENTRY_CFI(memzero)
+ ; adjust bzero args to memset args
+ mov r2, r1
+ mov r1, 0
+ b memset ;tail call so need to tinker with blink
+END_CFI(memzero)
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
new file mode 100644
index 0000000000..d52e2833f9
--- /dev/null
+++ b/arch/arc/lib/strchr-700.S
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/* ARC700 has a relatively long pipeline and branch prediction, so we want
+ to avoid branches that are hard to predict. On the other hand, the
+ presence of the norm instruction makes it easier to operate on whole
+ words branch-free. */
+
+#include <linux/linkage.h>
+
+ENTRY_CFI(strchr)
+ extb_s r1,r1
+ asl r5,r1,8
+ bmsk r2,r0,1
+ or r5,r5,r1
+ mov_s r3,0x01010101
+ breq.d r2,r0,.Laligned
+ asl r4,r5,16
+ sub_s r0,r0,r2
+ asl r7,r2,3
+ ld_s r2,[r0]
+#ifdef __LITTLE_ENDIAN__
+ asl r7,r3,r7
+#else
+ lsr r7,r3,r7
+#endif
+ or r5,r5,r4
+ ror r4,r3
+ sub r12,r2,r7
+ bic_s r12,r12,r2
+ and r12,r12,r4
+ brne.d r12,0,.Lfound0_ua
+ xor r6,r2,r5
+ ld.a r2,[r0,4]
+ sub r12,r6,r7
+ bic r12,r12,r6
+#ifdef __LITTLE_ENDIAN__
+ and r7,r12,r4
+ breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
+ b .Lfound_char ; Likewise this one.
+#else
+ and r12,r12,r4
+ breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
+ lsr_s r12,r12,7
+ bic r2,r7,r6
+ b.d .Lfound_char_b
+ and_s r2,r2,r12
+#endif
+; /* We require this code address to be unaligned for speed... */
+.Laligned:
+ ld_s r2,[r0]
+ or r5,r5,r4
+ ror r4,r3
+; /* ... so that this code address is aligned, for itself and ... */
+.Loop:
+ sub r12,r2,r3
+ bic_s r12,r12,r2
+ and r12,r12,r4
+ brne.d r12,0,.Lfound0
+ xor r6,r2,r5
+ ld.a r2,[r0,4]
+ sub r12,r6,r3
+ bic r12,r12,r6
+ and r7,r12,r4
+ breq r7,0,.Loop /* ... so that this branch is unaligned. */
+ ; Found searched-for character. r0 has already advanced to next word.
+#ifdef __LITTLE_ENDIAN__
+/* We only need the information about the first matching byte
+ (i.e. the least significant matching byte) to be exact,
+ hence there is no problem with carry effects. */
+.Lfound_char:
+ sub r3,r7,1
+ bic r3,r3,r7
+ norm r2,r3
+ sub_s r0,r0,1
+ asr_s r2,r2,3
+ j.d [blink]
+ sub_s r0,r0,r2
+
+ .balign 4
+.Lfound0_ua:
+ mov r3,r7
+.Lfound0:
+ sub r3,r6,r3
+ bic r3,r3,r6
+ and r2,r3,r4
+ or_s r12,r12,r2
+ sub_s r3,r12,1
+ bic_s r3,r3,r12
+ norm r3,r3
+ add_s r0,r0,3
+ asr_s r12,r3,3
+ asl.f 0,r2,r3
+ sub_s r0,r0,r12
+ j_s.d [blink]
+ mov.pl r0,0
+#else /* BIG ENDIAN */
+.Lfound_char:
+ lsr r7,r7,7
+
+ bic r2,r7,r6
+.Lfound_char_b:
+ norm r2,r2
+ sub_s r0,r0,4
+ asr_s r2,r2,3
+ j.d [blink]
+ add_s r0,r0,r2
+
+.Lfound0_ua:
+ mov_s r3,r7
+.Lfound0:
+ asl_s r2,r2,7
+ or r7,r6,r4
+ bic_s r12,r12,r2
+ sub r2,r7,r3
+ or r2,r2,r6
+ bic r12,r2,r12
+ bic.f r3,r4,r12
+ norm r3,r3
+
+ add.pl r3,r3,1
+ asr_s r12,r3,3
+ asl.f 0,r2,r3
+ add_s r0,r0,r12
+ j_s.d [blink]
+ mov.mi r0,0
+#endif /* ENDIAN */
+END_CFI(strchr)
diff --git a/arch/arc/lib/strcmp-archs.S b/arch/arc/lib/strcmp-archs.S
new file mode 100644
index 0000000000..7cffb37174
--- /dev/null
+++ b/arch/arc/lib/strcmp-archs.S
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/linkage.h>
+
+ENTRY_CFI(strcmp)
+ or r2, r0, r1
+ bmsk_s r2, r2, 1
+ brne r2, 0, @.Lcharloop
+
+;;; s1 and s2 are word aligned
+ ld.ab r2, [r0, 4]
+
+ mov_s r12, 0x01010101
+ ror r11, r12
+ .align 4
+.LwordLoop:
+ ld.ab r3, [r1, 4]
+ ;; Detect NULL char in str1
+ sub r4, r2, r12
+ ld.ab r5, [r0, 4]
+ bic r4, r4, r2
+ and r4, r4, r11
+ brne.d.nt r4, 0, .LfoundNULL
+ ;; Check if the read locations are the same
+ cmp r2, r3
+ beq.d .LwordLoop
+ mov.eq r2, r5
+
+ ;; A match is found, spot it out
+#ifdef __LITTLE_ENDIAN__
+ swape r3, r3
+ mov_s r0, 1
+ swape r2, r2
+#else
+ mov_s r0, 1
+#endif
+ cmp_s r2, r3
+ j_s.d [blink]
+ bset.lo r0, r0, 31
+
+ .align 4
+.LfoundNULL:
+#ifdef __BIG_ENDIAN__
+ swape r4, r4
+ swape r2, r2
+ swape r3, r3
+#endif
+ ;; Find null byte
+ ffs r0, r4
+ bmsk r2, r2, r0
+ bmsk r3, r3, r0
+ swape r2, r2
+ swape r3, r3
+ ;; make the return value
+ sub.f r0, r2, r3
+ mov.hi r0, 1
+ j_s.d [blink]
+ bset.lo r0, r0, 31
+
+ .align 4
+.Lcharloop:
+ ldb.ab r2, [r0, 1]
+ ldb.ab r3, [r1, 1]
+ nop
+ breq r2, 0, .Lcmpend
+ breq r2, r3, .Lcharloop
+
+ .align 4
+.Lcmpend:
+ j_s.d [blink]
+ sub r0, r2, r3
+END_CFI(strcmp)
diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
new file mode 100644
index 0000000000..b20c98fb3b
--- /dev/null
+++ b/arch/arc/lib/strcmp.S
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/* This is optimized primarily for the ARC700.
+ It would be possible to speed up the loops by one cycle / word
+ respective one cycle / byte by forcing double source 1 alignment, unrolling
+ by a factor of two, and speculatively loading the second word / byte of
+ source 1; however, that would increase the overhead for loop setup / finish,
+ and strcmp might often terminate early. */
+
+#include <linux/linkage.h>
+
+ENTRY_CFI(strcmp)
+ or r2,r0,r1
+ bmsk_s r2,r2,1
+ brne r2,0,.Lcharloop
+ mov_s r12,0x01010101
+ ror r5,r12
+.Lwordloop:
+ ld.ab r2,[r0,4]
+ ld.ab r3,[r1,4]
+ nop_s
+ sub r4,r2,r12
+ bic r4,r4,r2
+ and r4,r4,r5
+ brne r4,0,.Lfound0
+ breq r2,r3,.Lwordloop
+#ifdef __LITTLE_ENDIAN__
+ xor r0,r2,r3 ; mask for difference
+ sub_s r1,r0,1
+ bic_s r0,r0,r1 ; mask for least significant difference bit
+ sub r1,r5,r0
+ xor r0,r5,r1 ; mask for least significant difference byte
+ and_s r2,r2,r0
+ and_s r3,r3,r0
+#endif /* LITTLE ENDIAN */
+ cmp_s r2,r3
+ mov_s r0,1
+ j_s.d [blink]
+ bset.lo r0,r0,31
+
+ .balign 4
+#ifdef __LITTLE_ENDIAN__
+.Lfound0:
+ xor r0,r2,r3 ; mask for difference
+ or r0,r0,r4 ; or in zero indicator
+ sub_s r1,r0,1
+ bic_s r0,r0,r1 ; mask for least significant difference bit
+ sub r1,r5,r0
+ xor r0,r5,r1 ; mask for least significant difference byte
+ and_s r2,r2,r0
+ and_s r3,r3,r0
+ sub.f r0,r2,r3
+ mov.hi r0,1
+ j_s.d [blink]
+ bset.lo r0,r0,31
+#else /* BIG ENDIAN */
+ /* The zero-detection above can mis-detect 0x01 bytes as zeroes
+ because of carry-propagateion from a lower significant zero byte.
+ We can compensate for this by checking that bit0 is zero.
+ This compensation is not necessary in the step where we
+ get a low estimate for r2, because in any affected bytes
+ we already have 0x00 or 0x01, which will remain unchanged
+ when bit 7 is cleared. */
+ .balign 4
+.Lfound0:
+ lsr r0,r4,8
+ lsr_s r1,r2
+ bic_s r2,r2,r0 ; get low estimate for r2 and get ...
+ bic_s r0,r0,r1 ; <this is the adjusted mask for zeros>
+ or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ...
+ cmp_s r3,r2 ; ... be independent of trailing garbage
+ or_s r2,r2,r0 ; likewise for r3 > r2
+ bic_s r3,r3,r0
+ rlc r0,0 ; r0 := r2 > r3 ? 1 : 0
+ cmp_s r2,r3
+ j_s.d [blink]
+ bset.lo r0,r0,31
+#endif /* ENDIAN */
+
+ .balign 4
+.Lcharloop:
+ ldb.ab r2,[r0,1]
+ ldb.ab r3,[r1,1]
+ nop_s
+ breq r2,0,.Lcmpend
+ breq r2,r3,.Lcharloop
+.Lcmpend:
+ j_s.d [blink]
+ sub r0,r2,r3
+END_CFI(strcmp)
diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
new file mode 100644
index 0000000000..6e2294d13e
--- /dev/null
+++ b/arch/arc/lib/strcpy-700.S
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
+ If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
+ it 8 byte aligned. Thus, we can do a little read-ahead, without
+ dereferencing a cache line that we should not touch.
+ Note that short and long instructions have been scheduled to avoid
+ branch stalls.
+ The beq_s to r3z could be made unaligned & long to avoid a stall
+ there, but the it is not likely to be taken often, and it
+ would also be likey to cost an unaligned mispredict at the next call. */
+
+#include <linux/linkage.h>
+
+ENTRY_CFI(strcpy)
+ or r2,r0,r1
+ bmsk_s r2,r2,1
+ brne.d r2,0,charloop
+ mov_s r10,r0
+ ld_s r3,[r1,0]
+ mov r8,0x01010101
+ bbit0.d r1,2,loop_start
+ ror r12,r8
+ sub r2,r3,r8
+ bic_s r2,r2,r3
+ tst_s r2,r12
+ bne r3z
+ mov_s r4,r3
+ .balign 4
+loop:
+ ld.a r3,[r1,4]
+ st.ab r4,[r10,4]
+loop_start:
+ ld.a r4,[r1,4]
+ sub r2,r3,r8
+ bic_s r2,r2,r3
+ tst_s r2,r12
+ bne_s r3z
+ st.ab r3,[r10,4]
+ sub r2,r4,r8
+ bic r2,r2,r4
+ tst r2,r12
+ beq loop
+ mov_s r3,r4
+#ifdef __LITTLE_ENDIAN__
+r3z: bmsk.f r1,r3,7
+ lsr_s r3,r3,8
+#else
+r3z: lsr.f r1,r3,24
+ asl_s r3,r3,8
+#endif
+ bne.d r3z
+ stb.ab r1,[r10,1]
+ j_s [blink]
+
+ .balign 4
+charloop:
+ ldb.ab r3,[r1,1]
+
+
+ brne.d r3,0,charloop
+ stb.ab r3,[r10,1]
+ j [blink]
+END_CFI(strcpy)
diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
new file mode 100644
index 0000000000..dae428ceb8
--- /dev/null
+++ b/arch/arc/lib/strlen.S
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/linkage.h>
+
+ENTRY_CFI(strlen)
+ or r3,r0,7
+ ld r2,[r3,-7]
+ ld.a r6,[r3,-3]
+ mov r4,0x01010101
+ ; uses long immediate
+#ifdef __LITTLE_ENDIAN__
+ asl_s r1,r0,3
+ btst_s r0,2
+ asl r7,r4,r1
+ ror r5,r4
+ sub r1,r2,r7
+ bic_s r1,r1,r2
+ mov.eq r7,r4
+ sub r12,r6,r7
+ bic r12,r12,r6
+ or.eq r12,r12,r1
+ and r12,r12,r5
+ brne r12,0,.Learly_end
+#else /* BIG ENDIAN */
+ ror r5,r4
+ btst_s r0,2
+ mov_s r1,31
+ sub3 r7,r1,r0
+ sub r1,r2,r4
+ bic_s r1,r1,r2
+ bmsk r1,r1,r7
+ sub r12,r6,r4
+ bic r12,r12,r6
+ bmsk.ne r12,r12,r7
+ or.eq r12,r12,r1
+ and r12,r12,r5
+ brne r12,0,.Learly_end
+#endif /* ENDIAN */
+
+.Loop:
+ ld_s r2,[r3,4]
+ ld.a r6,[r3,8]
+ ; stall for load result
+ sub r1,r2,r4
+ bic_s r1,r1,r2
+ sub r12,r6,r4
+ bic r12,r12,r6
+ or r12,r12,r1
+ and r12,r12,r5
+ breq r12,0,.Loop
+.Lend:
+ and.f r1,r1,r5
+ sub.ne r3,r3,4
+ mov.eq r1,r12
+#ifdef __LITTLE_ENDIAN__
+ sub_s r2,r1,1
+ bic_s r2,r2,r1
+ norm r1,r2
+ sub_s r0,r0,3
+ lsr_s r1,r1,3
+ sub r0,r3,r0
+ j_s.d [blink]
+ sub r0,r0,r1
+#else /* BIG ENDIAN */
+ lsr_s r1,r1,7
+ mov.eq r2,r6
+ bic_s r1,r1,r2
+ norm r1,r1
+ sub r0,r3,r0
+ lsr_s r1,r1,3
+ j_s.d [blink]
+ add r0,r0,r1
+#endif /* ENDIAN */
+.Learly_end:
+ b.d .Lend
+ sub_s.ne r1,r1,r1
+END_CFI(strlen)
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
new file mode 100644
index 0000000000..633a773369
--- /dev/null
+++ b/arch/arc/mm/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+
+obj-y := extable.o ioremap.o dma.o fault.o init.o
+obj-y += tlb.o tlbex.o cache.o mmap.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
new file mode 100644
index 0000000000..f7e05c1466
--- /dev/null
+++ b/arch/arc/mm/cache.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARC Cache Management
+ *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/cache.h>
+#include <linux/mmu_context.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_ISA_ARCV2
+#define USE_RGN_FLSH 1
+#endif
+
+static int l2_line_sz;
+static int ioc_exists;
+int slc_enable = 1, ioc_enable = 1;
+unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
+unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
+
+static struct cpuinfo_arc_cache {
+ unsigned int sz_k, line_len, colors;
+} ic_info, dc_info, slc_info;
+
+void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op, const int full_page);
+
+void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
+void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
+void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
+
+static int read_decode_cache_bcr_arcv2(int c, char *buf, int len)
+{
+ struct cpuinfo_arc_cache *p_slc = &slc_info;
+ struct bcr_identity ident;
+ struct bcr_generic sbcr;
+ struct bcr_clust_cfg cbcr;
+ struct bcr_volatile vol;
+ int n = 0;
+
+ READ_BCR(ARC_REG_SLC_BCR, sbcr);
+ if (sbcr.ver) {
+ struct bcr_slc_cfg slc_cfg;
+ READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
+ p_slc->sz_k = 128 << slc_cfg.sz;
+ l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
+ n += scnprintf(buf + n, len - n,
+ "SLC\t\t: %uK, %uB Line%s\n",
+ p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable));
+ }
+
+ READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
+ if (cbcr.c) {
+ ioc_exists = 1;
+
+ /*
+ * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+ * simultaneously. This happens because as of today IOC aperture covers
+ * only ZONE_NORMAL (low mem) and any dma transactions outside this
+ * region won't be HW coherent.
+ * If we want to use both IOC and ZONE_HIGHMEM we can use
+ * bounce_buffer to handle dma transactions to HIGHMEM.
+ * Also it is possible to modify dma_direct cache ops or increase IOC
+ * aperture size if we are planning to use HIGHMEM without PAE.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
+ ioc_enable = 0;
+ } else {
+ ioc_enable = 0;
+ }
+
+ READ_BCR(AUX_IDENTITY, ident);
+
+ /* HS 2.0 didn't have AUX_VOL */
+ if (ident.family > 0x51) {
+ READ_BCR(AUX_VOL, vol);
+ perip_base = vol.start << 28;
+ /* HS 3.0 has limit and strict-ordering fields */
+ if (ident.family > 0x52)
+ perip_end = (vol.limit << 28) - 1;
+ }
+
+ n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
+ perip_base,
+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
+
+ return n;
+}
+
+int arc_cache_mumbojumbo(int c, char *buf, int len)
+{
+ struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info;
+ struct bcr_cache ibcr, dbcr;
+ int vipt, assoc;
+ int n = 0;
+
+ READ_BCR(ARC_REG_IC_BCR, ibcr);
+ if (!ibcr.ver)
+ goto dc_chk;
+
+ if (is_isa_arcompact() && (ibcr.ver <= 3)) {
+ BUG_ON(ibcr.config != 3);
+ assoc = 2; /* Fixed to 2w set assoc */
+ } else if (is_isa_arcv2() && (ibcr.ver >= 4)) {
+ assoc = 1 << ibcr.config; /* 1,2,4,8 */
+ }
+
+ p_ic->line_len = 8 << ibcr.line_len;
+ p_ic->sz_k = 1 << (ibcr.sz - 1);
+ p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
+
+ n += scnprintf(buf + n, len - n,
+ "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n",
+ p_ic->sz_k, assoc, p_ic->line_len,
+ p_ic->colors > 1 ? " aliasing" : "",
+ IS_USED_CFG(CONFIG_ARC_HAS_ICACHE));
+
+dc_chk:
+ READ_BCR(ARC_REG_DC_BCR, dbcr);
+ if (!dbcr.ver)
+ goto slc_chk;
+
+ if (is_isa_arcompact() && (dbcr.ver <= 3)) {
+ BUG_ON(dbcr.config != 2);
+ vipt = 1;
+ assoc = 4; /* Fixed to 4w set assoc */
+ p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
+ } else if (is_isa_arcv2() && (dbcr.ver >= 4)) {
+ vipt = 0;
+ assoc = 1 << dbcr.config; /* 1,2,4,8 */
+ p_dc->colors = 1; /* PIPT so can't VIPT alias */
+ }
+
+ p_dc->line_len = 16 << dbcr.line_len;
+ p_dc->sz_k = 1 << (dbcr.sz - 1);
+
+ n += scnprintf(buf + n, len - n,
+ "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
+ p_dc->sz_k, assoc, p_dc->line_len,
+ vipt ? "VIPT" : "PIPT",
+ p_dc->colors > 1 ? " aliasing" : "",
+ IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
+
+slc_chk:
+ if (is_isa_arcv2())
+ n += read_decode_cache_bcr_arcv2(c, buf + n, len - n);
+
+ return n;
+}
+
+/*
+ * Line Operation on {I,D}-Cache
+ */
+
+#define OP_INV 0x1
+#define OP_FLUSH 0x2
+#define OP_FLUSH_N_INV 0x3
+#define OP_INV_IC 0x4
+
+/*
+ * Cache Flush programming model
+ *
+ * ARC700 MMUv3 I$ and D$ are both VIPT and can potentially alias.
+ * Programming model requires both paddr and vaddr irrespecive of aliasing
+ * considerations:
+ * - vaddr in {I,D}C_IV?L
+ * - paddr in {I,D}C_PTAG
+ *
+ * In HS38x (MMUv4), D$ is PIPT, I$ is VIPT and can still alias.
+ * Programming model is different for aliasing vs. non-aliasing I$
+ * - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L
+ * - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$)
+ *
+ * - If PAE40 is enabled, independent of aliasing considerations, the higher
+ * bits needs to be written into PTAG_HI
+ */
+
+static inline
+void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op, const int full_page)
+{
+ unsigned int aux_cmd, aux_tag;
+ int num_lines;
+
+ if (op == OP_INV_IC) {
+ aux_cmd = ARC_REG_IC_IVIL;
+ aux_tag = ARC_REG_IC_PTAG;
+ } else {
+ aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
+ aux_tag = ARC_REG_DC_PTAG;
+ }
+
+ /* Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @paddr - aligned to cache line and integral @num_lines.
+ * This however can be avoided for page sized since:
+ * -@paddr will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!full_page) {
+ sz += paddr & ~CACHE_LINE_MASK;
+ paddr &= CACHE_LINE_MASK;
+ vaddr &= CACHE_LINE_MASK;
+ }
+ num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
+
+ /*
+ * MMUv3, cache ops require paddr in PTAG reg
+ * if V-P const for loop, PTAG can be written once outside loop
+ */
+ if (full_page)
+ write_aux_reg(aux_tag, paddr);
+
+ /*
+ * This is technically for MMU v4, using the MMU v3 programming model
+ * Special work for HS38 aliasing I-cache configuration with PAE40
+ * - upper 8 bits of paddr need to be written into PTAG_HI
+ * - (and needs to be written before the lower 32 bits)
+ * Note that PTAG_HI is hoisted outside the line loop
+ */
+ if (is_pae40_enabled() && op == OP_INV_IC)
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+
+ while (num_lines-- > 0) {
+ if (!full_page) {
+ write_aux_reg(aux_tag, paddr);
+ paddr += L1_CACHE_BYTES;
+ }
+
+ write_aux_reg(aux_cmd, vaddr);
+ vaddr += L1_CACHE_BYTES;
+ }
+}
+
+#ifndef USE_RGN_FLSH
+
+/*
+ */
+static inline
+void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op, const int full_page)
+{
+ unsigned int aux_cmd;
+ int num_lines;
+
+ if (op == OP_INV_IC) {
+ aux_cmd = ARC_REG_IC_IVIL;
+ } else {
+ /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
+ aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
+ }
+
+ /* Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @paddr - aligned to cache line and integral @num_lines.
+ * This however can be avoided for page sized since:
+ * -@paddr will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!full_page) {
+ sz += paddr & ~CACHE_LINE_MASK;
+ paddr &= CACHE_LINE_MASK;
+ }
+
+ num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
+
+ /*
+ * For HS38 PAE40 configuration
+ * - upper 8 bits of paddr need to be written into PTAG_HI
+ * - (and needs to be written before the lower 32 bits)
+ */
+ if (is_pae40_enabled()) {
+ if (op == OP_INV_IC)
+ /*
+ * Non aliasing I-cache in HS38,
+ * aliasing I-cache handled in __cache_line_loop_v3()
+ */
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+ else
+ write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
+ }
+
+ while (num_lines-- > 0) {
+ write_aux_reg(aux_cmd, paddr);
+ paddr += L1_CACHE_BYTES;
+ }
+}
+
+#else
+
+/*
+ * optimized flush operation which takes a region as opposed to iterating per line
+ */
+static inline
+void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op, const int full_page)
+{
+ unsigned int s, e;
+
+ /* Only for Non aliasing I-cache in HS38 */
+ if (op == OP_INV_IC) {
+ s = ARC_REG_IC_IVIR;
+ e = ARC_REG_IC_ENDR;
+ } else {
+ s = ARC_REG_DC_STARTR;
+ e = ARC_REG_DC_ENDR;
+ }
+
+ if (!full_page) {
+ /* for any leading gap between @paddr and start of cache line */
+ sz += paddr & ~CACHE_LINE_MASK;
+ paddr &= CACHE_LINE_MASK;
+
+ /*
+ * account for any trailing gap to end of cache line
+ * this is equivalent to DIV_ROUND_UP() in line ops above
+ */
+ sz += L1_CACHE_BYTES - 1;
+ }
+
+ if (is_pae40_enabled()) {
+ /* TBD: check if crossing 4TB boundary */
+ if (op == OP_INV_IC)
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+ else
+ write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
+ }
+
+ /* ENDR needs to be set ahead of START */
+ write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
+ write_aux_reg(s, paddr);
+
+ /* caller waits on DC_CTRL.FS */
+}
+
+#endif
+
+#ifdef CONFIG_ARC_MMU_V3
+#define __cache_line_loop __cache_line_loop_v3
+#else
+#define __cache_line_loop __cache_line_loop_v4
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+
+/***************************************************************
+ * Machine specific helpers for Entire D-Cache or Per Line ops
+ */
+
+#ifndef USE_RGN_FLSH
+/*
+ * this version avoids extra read/write of DC_CTRL for flush or invalid ops
+ * in the non region flush regime (such as for ARCompact)
+ */
+static inline void __before_dc_op(const int op)
+{
+ if (op == OP_FLUSH_N_INV) {
+ /* Dcache provides 2 cmd: FLUSH or INV
+ * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
+ * flush-n-inv is achieved by INV cmd but with IM=1
+ * So toggle INV sub-mode depending on op request and default
+ */
+ const unsigned int ctl = ARC_REG_DC_CTRL;
+ write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
+ }
+}
+
+#else
+
+static inline void __before_dc_op(const int op)
+{
+ const unsigned int ctl = ARC_REG_DC_CTRL;
+ unsigned int val = read_aux_reg(ctl);
+
+ if (op == OP_FLUSH_N_INV) {
+ val |= DC_CTRL_INV_MODE_FLUSH;
+ }
+
+ if (op != OP_INV_IC) {
+ /*
+ * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
+ * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
+ */
+ val &= ~DC_CTRL_RGN_OP_MSK;
+ if (op & OP_INV)
+ val |= DC_CTRL_RGN_OP_INV;
+ }
+ write_aux_reg(ctl, val);
+}
+
+#endif
+
+
+static inline void __after_dc_op(const int op)
+{
+ if (op & OP_FLUSH) {
+ const unsigned int ctl = ARC_REG_DC_CTRL;
+ unsigned int reg;
+
+ /* flush / flush-n-inv both wait */
+ while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
+ ;
+
+ /* Switch back to default Invalidate mode */
+ if (op == OP_FLUSH_N_INV)
+ write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
+ }
+}
+
+/*
+ * Operation on Entire D-Cache
+ * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
+ * Note that constant propagation ensures all the checks are gone
+ * in generated code
+ */
+static inline void __dc_entire_op(const int op)
+{
+ int aux;
+
+ __before_dc_op(op);
+
+ if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
+ aux = ARC_REG_DC_IVDC;
+ else
+ aux = ARC_REG_DC_FLSH;
+
+ write_aux_reg(aux, 0x1);
+
+ __after_dc_op(op);
+}
+
+static inline void __dc_disable(void)
+{
+ const int r = ARC_REG_DC_CTRL;
+
+ __dc_entire_op(OP_FLUSH_N_INV);
+ write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
+}
+
+static void __dc_enable(void)
+{
+ const int r = ARC_REG_DC_CTRL;
+
+ write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
+}
+
+/* For kernel mappings cache operation: index is same as paddr */
+#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
+
+/*
+ * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
+ */
+static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op)
+{
+ const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ __before_dc_op(op);
+
+ __cache_line_loop(paddr, vaddr, sz, op, full_page);
+
+ __after_dc_op(op);
+
+ local_irq_restore(flags);
+}
+
+#else
+
+#define __dc_entire_op(op)
+#define __dc_disable()
+#define __dc_enable()
+#define __dc_line_op(paddr, vaddr, sz, op)
+#define __dc_line_op_k(paddr, sz, op)
+
+#endif /* CONFIG_ARC_HAS_DCACHE */
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+
+static inline void __ic_entire_inv(void)
+{
+ write_aux_reg(ARC_REG_IC_IVIC, 1);
+ read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
+}
+
+static inline void
+__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz)
+{
+ const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
+ local_irq_restore(flags);
+}
+
+#ifndef CONFIG_SMP
+
+#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
+
+#else
+
+struct ic_inv_args {
+ phys_addr_t paddr, vaddr;
+ int sz;
+};
+
+static void __ic_line_inv_vaddr_helper(void *info)
+{
+ struct ic_inv_args *ic_inv = info;
+
+ __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
+}
+
+static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz)
+{
+ struct ic_inv_args ic_inv = {
+ .paddr = paddr,
+ .vaddr = vaddr,
+ .sz = sz
+ };
+
+ on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
+}
+
+#endif /* CONFIG_SMP */
+
+#else /* !CONFIG_ARC_HAS_ICACHE */
+
+#define __ic_entire_inv()
+#define __ic_line_inv_vaddr(pstart, vstart, sz)
+
+#endif /* CONFIG_ARC_HAS_ICACHE */
+
+static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
+{
+#ifdef CONFIG_ISA_ARCV2
+ /*
+ * SLC is shared between all cores and concurrent aux operations from
+ * multiple cores need to be serialized using a spinlock
+ * A concurrent operation can be silently ignored and/or the old/new
+ * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
+ * below)
+ */
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ unsigned int ctrl;
+ phys_addr_t end;
+
+ spin_lock_irqsave(&lock, flags);
+
+ /*
+ * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
+ * - b'000 (default) is Flush,
+ * - b'001 is Invalidate if CTRL.IM == 0
+ * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
+ */
+ ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
+
+ /* Don't rely on default value of IM bit */
+ if (!(op & OP_FLUSH)) /* i.e. OP_INV */
+ ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
+ else
+ ctrl |= SLC_CTRL_IM;
+
+ if (op & OP_INV)
+ ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
+ else
+ ctrl &= ~SLC_CTRL_RGN_OP_INV;
+
+ write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
+
+ /*
+ * Lower bits are ignored, no need to clip
+ * END needs to be setup before START (latter triggers the operation)
+ * END can't be same as START, so add (l2_line_sz - 1) to sz
+ */
+ end = paddr + sz + l2_line_sz - 1;
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
+
+ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
+
+ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
+
+ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
+ read_aux_reg(ARC_REG_SLC_CTRL);
+
+ while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
+
+ spin_unlock_irqrestore(&lock, flags);
+#endif
+}
+
+static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
+{
+#ifdef CONFIG_ISA_ARCV2
+ /*
+ * SLC is shared between all cores and concurrent aux operations from
+ * multiple cores need to be serialized using a spinlock
+ * A concurrent operation can be silently ignored and/or the old/new
+ * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
+ * below)
+ */
+ static DEFINE_SPINLOCK(lock);
+
+ const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
+ unsigned int ctrl, cmd;
+ unsigned long flags;
+ int num_lines;
+
+ spin_lock_irqsave(&lock, flags);
+
+ ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
+
+ /* Don't rely on default value of IM bit */
+ if (!(op & OP_FLUSH)) /* i.e. OP_INV */
+ ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
+ else
+ ctrl |= SLC_CTRL_IM;
+
+ write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
+
+ cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
+
+ sz += paddr & ~SLC_LINE_MASK;
+ paddr &= SLC_LINE_MASK;
+
+ num_lines = DIV_ROUND_UP(sz, l2_line_sz);
+
+ while (num_lines-- > 0) {
+ write_aux_reg(cmd, paddr);
+ paddr += l2_line_sz;
+ }
+
+ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
+ read_aux_reg(ARC_REG_SLC_CTRL);
+
+ while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
+
+ spin_unlock_irqrestore(&lock, flags);
+#endif
+}
+
+#define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
+
+noinline static void slc_entire_op(const int op)
+{
+ unsigned int ctrl, r = ARC_REG_SLC_CTRL;
+
+ ctrl = read_aux_reg(r);
+
+ if (!(op & OP_FLUSH)) /* i.e. OP_INV */
+ ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
+ else
+ ctrl |= SLC_CTRL_IM;
+
+ write_aux_reg(r, ctrl);
+
+ if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
+ write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
+ else
+ write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
+
+ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
+ read_aux_reg(r);
+
+ /* Important to wait for flush to complete */
+ while (read_aux_reg(r) & SLC_CTRL_BUSY);
+}
+
+static inline void arc_slc_disable(void)
+{
+ const int r = ARC_REG_SLC_CTRL;
+
+ slc_entire_op(OP_FLUSH_N_INV);
+ write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
+}
+
+static inline void arc_slc_enable(void)
+{
+ const int r = ARC_REG_SLC_CTRL;
+
+ write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
+}
+
+/***********************************************************
+ * Exported APIs
+ */
+
+/*
+ * Handle cache congruency of kernel and userspace mappings of page when kernel
+ * writes-to/reads-from
+ *
+ * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
+ * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
+ * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
+ * -In SMP, if hardware caches are coherent
+ *
+ * There's a corollary case, where kernel READs from a userspace mapped page.
+ * If the U-mapping is not congruent to K-mapping, former needs flushing.
+ */
+void flush_dcache_folio(struct folio *folio)
+{
+ struct address_space *mapping;
+
+ if (!cache_is_vipt_aliasing()) {
+ clear_bit(PG_dc_clean, &folio->flags);
+ return;
+ }
+
+ /* don't handle anon pages here */
+ mapping = folio_flush_mapping(folio);
+ if (!mapping)
+ return;
+
+ /*
+ * pagecache page, file not yet mapped to userspace
+ * Make a note that K-mapping is dirty
+ */
+ if (!mapping_mapped(mapping)) {
+ clear_bit(PG_dc_clean, &folio->flags);
+ } else if (folio_mapped(folio)) {
+ /* kernel reading from page with U-mapping */
+ phys_addr_t paddr = (unsigned long)folio_address(folio);
+ unsigned long vaddr = folio_pos(folio);
+
+ /*
+ * vaddr is not actually the virtual address, but is
+ * congruent to every user mapping.
+ */
+ if (addr_not_cache_congruent(paddr, vaddr))
+ __flush_dcache_pages(paddr, vaddr,
+ folio_nr_pages(folio));
+ }
+}
+EXPORT_SYMBOL(flush_dcache_folio);
+
+void flush_dcache_page(struct page *page)
+{
+ return flush_dcache_folio(page_folio(page));
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+/*
+ * DMA ops for systems with L1 cache only
+ * Make memory coherent with L1 cache by flushing/invalidating L1 lines
+ */
+static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
+}
+
+static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_INV);
+}
+
+static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_FLUSH);
+}
+
+/*
+ * DMA ops for systems with both L1 and L2 caches, but without IOC
+ * Both L1 and L2 lines need to be explicitly flushed/invalidated
+ */
+static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
+ slc_op(start, sz, OP_FLUSH_N_INV);
+}
+
+static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_INV);
+ slc_op(start, sz, OP_INV);
+}
+
+static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
+{
+ __dc_line_op_k(start, sz, OP_FLUSH);
+ slc_op(start, sz, OP_FLUSH);
+}
+
+/*
+ * Exported DMA API
+ */
+void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
+{
+ __dma_cache_wback_inv(start, sz);
+}
+EXPORT_SYMBOL(dma_cache_wback_inv);
+
+void dma_cache_inv(phys_addr_t start, unsigned long sz)
+{
+ __dma_cache_inv(start, sz);
+}
+EXPORT_SYMBOL(dma_cache_inv);
+
+void dma_cache_wback(phys_addr_t start, unsigned long sz)
+{
+ __dma_cache_wback(start, sz);
+}
+EXPORT_SYMBOL(dma_cache_wback);
+
+/*
+ * This is API for making I/D Caches consistent when modifying
+ * kernel code (loadable modules, kprobes, kgdb...)
+ * This is called on insmod, with kernel virtual address for CODE of
+ * the module. ARC cache maintenance ops require PHY address thus we
+ * need to convert vmalloc addr to PHY addr
+ */
+void flush_icache_range(unsigned long kstart, unsigned long kend)
+{
+ unsigned int tot_sz;
+
+ WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
+
+ /* Shortcut for bigger flush ranges.
+ * Here we don't care if this was kernel virtual or phy addr
+ */
+ tot_sz = kend - kstart;
+ if (tot_sz > PAGE_SIZE) {
+ flush_cache_all();
+ return;
+ }
+
+ /* Case: Kernel Phy addr (0x8000_0000 onwards) */
+ if (likely(kstart > PAGE_OFFSET)) {
+ /*
+ * The 2nd arg despite being paddr will be used to index icache
+ * This is OK since no alternate virtual mappings will exist
+ * given the callers for this case: kprobe/kgdb in built-in
+ * kernel code only.
+ */
+ __sync_icache_dcache(kstart, kstart, kend - kstart);
+ return;
+ }
+
+ /*
+ * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
+ * (1) ARC Cache Maintenance ops only take Phy addr, hence special
+ * handling of kernel vaddr.
+ *
+ * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
+ * it still needs to handle a 2 page scenario, where the range
+ * straddles across 2 virtual pages and hence need for loop
+ */
+ while (tot_sz > 0) {
+ unsigned int off, sz;
+ unsigned long phy, pfn;
+
+ off = kstart % PAGE_SIZE;
+ pfn = vmalloc_to_pfn((void *)kstart);
+ phy = (pfn << PAGE_SHIFT) + off;
+ sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
+ __sync_icache_dcache(phy, kstart, sz);
+ kstart += sz;
+ tot_sz -= sz;
+ }
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+/*
+ * General purpose helper to make I and D cache lines consistent.
+ * @paddr is phy addr of region
+ * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
+ * However in one instance, when called by kprobe (for a breakpt in
+ * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
+ * use a paddr to index the cache (despite VIPT). This is fine since a
+ * builtin kernel page will not have any virtual mappings.
+ * kprobe on loadable module will be kernel vaddr.
+ */
+void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
+{
+ __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
+ __ic_line_inv_vaddr(paddr, vaddr, len);
+}
+
+/* wrapper to compile time eliminate alignment checks in flush loop */
+void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
+{
+ __ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE);
+}
+
+/*
+ * wrapper to clearout kernel or userspace mappings of a page
+ * For kernel mappings @vaddr == @paddr
+ */
+void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
+{
+ __dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV);
+}
+
+noinline void flush_cache_all(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ __ic_entire_inv();
+ __dc_entire_op(OP_FLUSH_N_INV);
+
+ local_irq_restore(flags);
+
+}
+
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+ flush_cache_all();
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
+ unsigned long pfn)
+{
+ phys_addr_t paddr = pfn << PAGE_SHIFT;
+
+ u_vaddr &= PAGE_MASK;
+
+ __flush_dcache_pages(paddr, u_vaddr, 1);
+
+ if (vma->vm_flags & VM_EXEC)
+ __inv_icache_pages(paddr, u_vaddr, 1);
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ flush_cache_all();
+}
+
+void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long u_vaddr)
+{
+ /* TBD: do we really need to clear the kernel mapping */
+ __flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1);
+ __flush_dcache_pages((phys_addr_t)page_address(page),
+ (phys_addr_t)page_address(page), 1);
+
+}
+
+#endif
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long u_vaddr, struct vm_area_struct *vma)
+{
+ struct folio *src = page_folio(from);
+ struct folio *dst = page_folio(to);
+ void *kfrom = kmap_atomic(from);
+ void *kto = kmap_atomic(to);
+ int clean_src_k_mappings = 0;
+
+ /*
+ * If SRC page was already mapped in userspace AND it's U-mapping is
+ * not congruent with K-mapping, sync former to physical page so that
+ * K-mapping in memcpy below, sees the right data
+ *
+ * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
+ * equally valid for SRC page as well
+ *
+ * For !VIPT cache, all of this gets compiled out as
+ * addr_not_cache_congruent() is 0
+ */
+ if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
+ __flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1);
+ clean_src_k_mappings = 1;
+ }
+
+ copy_page(kto, kfrom);
+
+ /*
+ * Mark DST page K-mapping as dirty for a later finalization by
+ * update_mmu_cache(). Although the finalization could have been done
+ * here as well (given that both vaddr/paddr are available).
+ * But update_mmu_cache() already has code to do that for other
+ * non copied user pages (e.g. read faults which wire in pagecache page
+ * directly).
+ */
+ clear_bit(PG_dc_clean, &dst->flags);
+
+ /*
+ * if SRC was already usermapped and non-congruent to kernel mapping
+ * sync the kernel mapping back to physical page
+ */
+ if (clean_src_k_mappings) {
+ __flush_dcache_pages((unsigned long)kfrom,
+ (unsigned long)kfrom, 1);
+ } else {
+ clear_bit(PG_dc_clean, &src->flags);
+ }
+
+ kunmap_atomic(kto);
+ kunmap_atomic(kfrom);
+}
+
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
+{
+ struct folio *folio = page_folio(page);
+ clear_page(to);
+ clear_bit(PG_dc_clean, &folio->flags);
+}
+EXPORT_SYMBOL(clear_user_page);
+
+/**********************************************************************
+ * Explicit Cache flush request from user space via syscall
+ * Needed for JITs which generate code on the fly
+ */
+SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
+{
+ /* TBD: optimize this */
+ flush_cache_all();
+ return 0;
+}
+
+/*
+ * IO-Coherency (IOC) setup rules:
+ *
+ * 1. Needs to be at system level, so only once by Master core
+ * Non-Masters need not be accessing caches at that time
+ * - They are either HALT_ON_RESET and kick started much later or
+ * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
+ * doesn't perturb caches or coherency unit
+ *
+ * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
+ * otherwise any straggler data might behave strangely post IOC enabling
+ *
+ * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
+ * Coherency transactions
+ */
+static noinline void __init arc_ioc_setup(void)
+{
+ unsigned int ioc_base, mem_sz;
+
+ /*
+ * If IOC was already enabled (due to bootloader) it technically needs to
+ * be reconfigured with aperture base,size corresponding to Linux memory map
+ * which will certainly be different than uboot's. But disabling and
+ * reenabling IOC when DMA might be potentially active is tricky business.
+ * To avoid random memory issues later, just panic here and ask user to
+ * upgrade bootloader to one which doesn't enable IOC
+ */
+ if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
+ panic("IOC already enabled, please upgrade bootloader!\n");
+
+ if (!ioc_enable)
+ return;
+
+ /* Flush + invalidate + disable L1 dcache */
+ __dc_disable();
+
+ /* Flush + invalidate SLC */
+ if (read_aux_reg(ARC_REG_SLC_BCR))
+ slc_entire_op(OP_FLUSH_N_INV);
+
+ /*
+ * currently IOC Aperture covers entire DDR
+ * TBD: fix for PGU + 1GB of low mem
+ * TBD: fix for PAE
+ */
+ mem_sz = arc_get_mem_sz();
+
+ if (!is_power_of_2(mem_sz) || mem_sz < 4096)
+ panic("IOC Aperture size must be power of 2 larger than 4KB");
+
+ /*
+ * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
+ * so setting 0x11 implies 512MB, 0x12 implies 1GB...
+ */
+ write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
+
+ /* for now assume kernel base is start of IOC aperture */
+ ioc_base = CONFIG_LINUX_RAM_BASE;
+
+ if (ioc_base % mem_sz != 0)
+ panic("IOC Aperture start must be aligned to the size of the aperture");
+
+ write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
+ write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
+ write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
+
+ /* Re-enable L1 dcache */
+ __dc_enable();
+}
+
+/*
+ * Cache related boot time checks/setups only needed on master CPU:
+ * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
+ * Assume SMP only, so all cores will have same cache config. A check on
+ * one core suffices for all
+ * - IOC setup / dma callbacks only need to be done once
+ */
+static noinline void __init arc_cache_init_master(void)
+{
+ if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
+ struct cpuinfo_arc_cache *ic = &ic_info;
+
+ if (!ic->line_len)
+ panic("cache support enabled but non-existent cache\n");
+
+ if (ic->line_len != L1_CACHE_BYTES)
+ panic("ICache line [%d] != kernel Config [%d]",
+ ic->line_len, L1_CACHE_BYTES);
+
+ /*
+ * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
+ * pair to provide vaddr/paddr respectively, just as in MMU v3
+ */
+ if (is_isa_arcv2() && ic->colors > 1)
+ _cache_line_loop_ic_fn = __cache_line_loop_v3;
+ else
+ _cache_line_loop_ic_fn = __cache_line_loop;
+ }
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
+ struct cpuinfo_arc_cache *dc = &dc_info;
+
+ if (!dc->line_len)
+ panic("cache support enabled but non-existent cache\n");
+
+ if (dc->line_len != L1_CACHE_BYTES)
+ panic("DCache line [%d] != kernel Config [%d]",
+ dc->line_len, L1_CACHE_BYTES);
+
+ /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
+ if (is_isa_arcompact()) {
+ int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
+
+ if (dc->colors > 1) {
+ if (!handled)
+ panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+ if (CACHE_COLORS_NUM != dc->colors)
+ panic("CACHE_COLORS_NUM not optimized for config\n");
+ } else if (handled && dc->colors == 1) {
+ panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+ }
+ }
+ }
+
+ /*
+ * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
+ * or equal to any cache line length.
+ */
+ BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
+ "SMP_CACHE_BYTES must be >= any cache line length");
+ if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
+ panic("L2 Cache line [%d] > kernel Config [%d]\n",
+ l2_line_sz, SMP_CACHE_BYTES);
+
+ /* Note that SLC disable not formally supported till HS 3.0 */
+ if (is_isa_arcv2() && l2_line_sz && !slc_enable)
+ arc_slc_disable();
+
+ if (is_isa_arcv2() && ioc_exists)
+ arc_ioc_setup();
+
+ if (is_isa_arcv2() && l2_line_sz && slc_enable) {
+ __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
+ __dma_cache_inv = __dma_cache_inv_slc;
+ __dma_cache_wback = __dma_cache_wback_slc;
+ } else {
+ __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
+ __dma_cache_inv = __dma_cache_inv_l1;
+ __dma_cache_wback = __dma_cache_wback_l1;
+ }
+ /*
+ * In case of IOC (say IOC+SLC case), pointers above could still be set
+ * but end up not being relevant as the first function in chain is not
+ * called at all for devices using coherent DMA.
+ * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
+ */
+}
+
+void __ref arc_cache_init(void)
+{
+ unsigned int __maybe_unused cpu = smp_processor_id();
+
+ if (!cpu)
+ arc_cache_init_master();
+
+ /*
+ * In PAE regime, TLB and cache maintenance ops take wider addresses
+ * And even if PAE is not enabled in kernel, the upper 32-bits still need
+ * to be zeroed to keep the ops sane.
+ * As an optimization for more common !PAE enabled case, zero them out
+ * once at init, rather than checking/setting to 0 for every runtime op
+ */
+ if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
+ write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
+ write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
+
+ if (l2_line_sz) {
+ write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
+ write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
+ }
+ }
+}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
new file mode 100644
index 0000000000..2a7fbbb83b
--- /dev/null
+++ b/arch/arc/mm/dma.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/dma-map-ops.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+
+/*
+ * ARCH specific callbacks for generic noncoherent DMA ops
+ * - hardware IOC not available (or "dma-coherent" not set for device in DT)
+ * - But still handle both coherent and non-coherent requests from caller
+ *
+ * For DMA coherent hardware (IOC) generic code suffices
+ */
+
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ /*
+ * Evict any existing L1 and/or L2 lines for the backing page
+ * in case it was used earlier as a normal "cached" page.
+ * Yeah this bit us - STAR 9000898266
+ *
+ * Although core does call flush_cache_vmap(), it gets kvaddr hence
+ * can't be used to efficiently flush L1 and/or L2 which need paddr
+ * Currently flush_cache_vmap nukes the L1 cache completely which
+ * will be optimized as a separate commit
+ */
+ dma_cache_wback_inv(page_to_phys(page), size);
+}
+
+/*
+ * Cache operations depending on function and direction argument, inspired by
+ * https://lore.kernel.org/lkml/20180518175004.GF17671@n2100.armlinux.org.uk
+ * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
+ * dma-mapping: provide a generic dma-noncoherent implementation)"
+ *
+ * | map == for_device | unmap == for_cpu
+ * |----------------------------------------------------------------
+ * TO_DEV | writeback writeback | none none
+ * FROM_DEV | invalidate invalidate | invalidate* invalidate*
+ * BIDIR | writeback+inv writeback+inv | invalidate invalidate
+ *
+ * [*] needed for CPU speculative prefetches
+ *
+ * NOTE: we don't check the validity of direction argument as it is done in
+ * upper layer functions (in include/linux/dma-mapping.h)
+ */
+
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ break;
+
+ /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ dma_cache_inv(paddr, size);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Plug in direct dma map ops.
+ */
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent)
+{
+ /*
+ * IOC hardware snoops all DMA traffic keeping the caches consistent
+ * with memory - eliding need for any explicit cache maintenance of
+ * DMA buffers.
+ */
+ if (is_isa_arcv2() && ioc_enable && coherent)
+ dev->dma_coherent = true;
+
+ dev_info(dev, "use %scoherent DMA ops\n",
+ dev->dma_coherent ? "" : "non");
+}
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
new file mode 100644
index 0000000000..88fa3a4d49
--- /dev/null
+++ b/arch/arc/mm/extable.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Borrowed heavily from MIPS
+ */
+
+#include <linux/export.h>
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+ if (fixup) {
+ regs->ret = fixup->fixup;
+
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
new file mode 100644
index 0000000000..95119a5e77
--- /dev/null
+++ b/arch/arc/mm/fault.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Page Fault Handling for ARC (TLB Miss / ProtV)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/signal.h>
+#include <linux/interrupt.h>
+#include <linux/sched/signal.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+#include <linux/perf_event.h>
+#include <linux/mm_types.h>
+#include <asm/entry.h>
+#include <asm/mmu.h>
+
+/*
+ * kernel virtual address is required to implement vmalloc/pkmap/fixmap
+ * Refer to asm/processor.h for System Memory Map
+ *
+ * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
+ * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
+ */
+noinline static int handle_kernel_vaddr_fault(unsigned long address)
+{
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+
+ pgd = pgd_offset(current->active_mm, address);
+ pgd_k = pgd_offset_k(address);
+
+ if (pgd_none (*pgd_k))
+ goto bad_area;
+ if (!pgd_present(*pgd))
+ set_pgd(pgd, *pgd_k);
+
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (p4d_none(*p4d_k))
+ goto bad_area;
+ if (!p4d_present(*p4d))
+ set_p4d(p4d, *p4d_k);
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
+ if (pud_none(*pud_k))
+ goto bad_area;
+ if (!pud_present(*pud))
+ set_pud(pud, *pud_k);
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (pmd_none(*pmd_k))
+ goto bad_area;
+ if (!pmd_present(*pmd))
+ set_pmd(pmd, *pmd_k);
+
+ /* XXX: create the TLB entry here */
+ return 0;
+
+bad_area:
+ return 1;
+}
+
+void do_page_fault(unsigned long address, struct pt_regs *regs)
+{
+ struct vm_area_struct *vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ int sig, si_code = SEGV_MAPERR;
+ unsigned int write = 0, exec = 0, mask;
+ vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
+ unsigned int flags; /* handle_mm_fault() input */
+
+ /*
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (address >= VMALLOC_START && !user_mode(regs)) {
+ if (unlikely(handle_kernel_vaddr_fault(address)))
+ goto no_context;
+ else
+ return;
+ }
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (faulthandler_disabled() || !mm)
+ goto no_context;
+
+ if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
+ write = 1;
+ else if ((regs->ecr.vec == ECR_V_PROTV) &&
+ (regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
+ exec = 1;
+
+ flags = FAULT_FLAG_DEFAULT;
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+ if (write)
+ flags |= FAULT_FLAG_WRITE;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+retry:
+ vma = lock_mm_and_find_vma(mm, address, regs);
+ if (!vma)
+ goto bad_area_nosemaphore;
+
+ /*
+ * vm_area is good, now check permissions for this memory access
+ */
+ mask = VM_READ;
+ if (write)
+ mask = VM_WRITE;
+ if (exec)
+ mask = VM_EXEC;
+
+ if (!(vma->vm_flags & mask)) {
+ si_code = SEGV_ACCERR;
+ goto bad_area;
+ }
+
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+ }
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
+ /*
+ * Fault retry nuances, mmap_lock already relinquished by core mm
+ */
+ if (unlikely(fault & VM_FAULT_RETRY)) {
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
+
+bad_area:
+ mmap_read_unlock(mm);
+
+bad_area_nosemaphore:
+ /*
+ * Major/minor page fault accounting
+ * (in case of retry we only land here once)
+ */
+ if (likely(!(fault & VM_FAULT_ERROR)))
+ /* Normal return path: fault Handled Gracefully */
+ return;
+
+ if (!user_mode(regs))
+ goto no_context;
+
+ if (fault & VM_FAULT_OOM) {
+ pagefault_out_of_memory();
+ return;
+ }
+
+ if (fault & VM_FAULT_SIGBUS) {
+ sig = SIGBUS;
+ si_code = BUS_ADRERR;
+ }
+ else {
+ sig = SIGSEGV;
+ }
+
+ tsk->thread.fault_address = address;
+ force_sig_fault(sig, si_code, (void __user *)address);
+ return;
+
+no_context:
+ if (fixup_exception(regs))
+ return;
+
+ die("Oops", regs, address);
+}
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
new file mode 100644
index 0000000000..c79912a6b1
--- /dev/null
+++ b/arch/arc/mm/highmem.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/memblock.h>
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <linux/pgtable.h>
+#include <asm/processor.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * HIGHMEM API:
+ *
+ * kmap() API provides sleep semantics hence referred to as "permanent maps"
+ * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
+ * for book-keeping
+ *
+ * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
+ * shortlived ala "temporary mappings" which historically were implemented as
+ * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
+ *
+ * Both these facts combined (preemption disabled and per-cpu allocation)
+ * means the total number of concurrent fixmaps will be limited to max
+ * such allocations in a single control path. Thus KM_TYPE_NR (another
+ * historic relic) is a small'ish number which caps max percpu fixmaps
+ *
+ * ARC HIGHMEM Details
+ *
+ * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
+ * is now shared between vmalloc and kmap (non overlapping though)
+ *
+ * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
+ * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
+ * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
+ *
+ * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
+ * CPU. So the number of CPUs sharing a single PTE page is limited.
+ *
+ * - pkmap being preemptible, in theory could do with more than 256 concurrent
+ * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
+ * the PGD and only works with a single page table @pkmap_page_table, hence
+ * sets the limit
+ */
+
+extern pte_t * pkmap_page_table;
+
+static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
+{
+ pmd_t *pmd_k = pmd_off_k(kvaddr);
+ pte_t *pte_k;
+
+ pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pte_k)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
+ pmd_populate_kernel(&init_mm, pmd_k, pte_k);
+ return pte_k;
+}
+
+void __init kmap_init(void)
+{
+ /* Due to recursive include hell, we can't do this in processor.h */
+ BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
+ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+ BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
+
+ pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
+ alloc_kmap_pgtable(FIXMAP_BASE);
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
new file mode 100644
index 0000000000..6a71b23f13
--- /dev/null
+++ b/arch/arc/mm/init.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/initrd.h>
+#endif
+#include <linux/of_fdt.h>
+#include <linux/swap.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/arcregs.h>
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
+char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+EXPORT_SYMBOL(empty_zero_page);
+
+static const unsigned long low_mem_start = CONFIG_LINUX_RAM_BASE;
+static unsigned long low_mem_sz;
+
+#ifdef CONFIG_HIGHMEM
+static unsigned long min_high_pfn, max_high_pfn;
+static phys_addr_t high_mem_start;
+static phys_addr_t high_mem_sz;
+unsigned long arch_pfn_offset;
+EXPORT_SYMBOL(arch_pfn_offset);
+#endif
+
+long __init arc_get_mem_sz(void)
+{
+ return low_mem_sz;
+}
+
+/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
+static int __init setup_mem_sz(char *str)
+{
+ low_mem_sz = memparse(str, NULL) & PAGE_MASK;
+
+ /* early console might not be setup yet - it will show up later */
+ pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(low_mem_sz));
+
+ return 0;
+}
+early_param("mem", setup_mem_sz);
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ int in_use = 0;
+
+ if (!low_mem_sz) {
+ if (base != low_mem_start)
+ panic("CONFIG_LINUX_RAM_BASE != DT memory { }");
+
+ low_mem_sz = size;
+ in_use = 1;
+ memblock_add_node(base, size, 0, MEMBLOCK_NONE);
+ } else {
+#ifdef CONFIG_HIGHMEM
+ high_mem_start = base;
+ high_mem_sz = size;
+ in_use = 1;
+ memblock_add_node(base, size, 1, MEMBLOCK_NONE);
+ memblock_reserve(base, size);
+#endif
+ }
+
+ pr_info("Memory @ %llx [%lldM] %s\n",
+ base, TO_MB(size), !in_use ? "Not used":"");
+}
+
+/*
+ * First memory setup routine called from setup_arch()
+ * 1. setup swapper's mm @init_mm
+ * 2. Count the pages we have and setup bootmem allocator
+ * 3. zone setup
+ */
+void __init setup_arch_memory(void)
+{
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
+
+ setup_initial_init_mm(_text, _etext, _edata, _end);
+
+ /* first page of system - kernel .vector starts here */
+ min_low_pfn = virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE);
+
+ /* Last usable page of low mem */
+ max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
+
+ /*------------- bootmem allocator setup -----------------------*/
+
+ /*
+ * seed the bootmem allocator after any DT memory node parsing or
+ * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
+ *
+ * Only low mem is added, otherwise we have crashes when allocating
+ * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
+ * avail memory, ending in highmem with a > 32-bit address. However
+ * it then tries to memset it with a truncaed 32-bit handle, causing
+ * the crash
+ */
+
+ memblock_reserve(CONFIG_LINUX_LINK_BASE,
+ __pa(_end) - CONFIG_LINUX_LINK_BASE);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (phys_initrd_size) {
+ memblock_reserve(phys_initrd_start, phys_initrd_size);
+ initrd_start = (unsigned long)__va(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
+ }
+#endif
+
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
+ memblock_dump_all();
+
+ /*----------------- node/zones setup --------------------------*/
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
+
+#ifdef CONFIG_HIGHMEM
+ /*
+ * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
+ * than addresses in normal aka low memory (0x8000_0000 based).
+ * Even with PAE, the huge peripheral space hole would waste a lot of
+ * mem with single contiguous mem_map[].
+ * Thus when HIGHMEM on ARC is enabled the memory map corresponding
+ * to the hole is freed and ARC specific version of pfn_valid()
+ * handles the hole in the memory map.
+ */
+
+ min_high_pfn = PFN_DOWN(high_mem_start);
+ max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+
+ /*
+ * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
+ * For HIGHMEM without PAE max_high_pfn should be less than
+ * min_low_pfn to guarantee that these two regions don't overlap.
+ * For PAE case highmem is greater than lowmem, so it is natural
+ * to use max_high_pfn.
+ *
+ * In both cases, holes should be handled by pfn_valid().
+ */
+ max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
+
+ high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
+
+ arch_pfn_offset = min(min_low_pfn, min_high_pfn);
+ kmap_init();
+
+#else /* CONFIG_HIGHMEM */
+ /* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */
+ max_mapnr = max_low_pfn - min_low_pfn;
+
+#endif /* CONFIG_HIGHMEM */
+
+ free_area_init(max_zone_pfn);
+}
+
+static void __init highmem_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long tmp;
+
+ memblock_phys_free(high_mem_start, high_mem_sz);
+ for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
+#endif
+}
+
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
+ memblock_free_all();
+ highmem_init();
+
+ BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
+}
+
+#ifdef CONFIG_HIGHMEM
+int pfn_valid(unsigned long pfn)
+{
+ return (pfn >= min_high_pfn && pfn <= max_high_pfn) ||
+ (pfn >= min_low_pfn && pfn <= max_low_pfn);
+}
+EXPORT_SYMBOL(pfn_valid);
+#endif
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
new file mode 100644
index 0000000000..b07004d532
--- /dev/null
+++ b/arch/arc/mm/ioremap.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/cache.h>
+
+static inline bool arc_uncached_addr_space(phys_addr_t paddr)
+{
+ if (is_isa_arcompact()) {
+ if (paddr >= ARC_UNCACHED_ADDR_SPACE)
+ return true;
+ } else if (paddr >= perip_base && paddr <= perip_end) {
+ return true;
+ }
+
+ return false;
+}
+
+void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
+{
+ /*
+ * If the region is h/w uncached, MMU mapping can be elided as optim
+ * The cast to u32 is fine as this region can only be inside 4GB
+ */
+ if (arc_uncached_addr_space(paddr))
+ return (void __iomem *)(u32)paddr;
+
+ return ioremap_prot(paddr, size,
+ pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+}
+EXPORT_SYMBOL(ioremap);
+
+/*
+ * ioremap with access flags
+ * Cache semantics wise it is same as ioremap - "forced" uncached.
+ * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
+ * ARC hardware uncached region, this one still goes thru the MMU as caller
+ * might need finer access control (R/W/X)
+ */
+void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
+ unsigned long flags)
+{
+ pgprot_t prot = __pgprot(flags);
+
+ /* force uncached */
+ return generic_ioremap_prot(paddr, size, pgprot_noncached(prot));
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+void iounmap(volatile void __iomem *addr)
+{
+ /* weird double cast to handle phys_addr_t > 32 bits */
+ if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
+ return;
+
+ generic_iounmap(addr);
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
new file mode 100644
index 0000000000..fce5fa2b4f
--- /dev/null
+++ b/arch/arc/mm/mmap.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARC700 mmap
+ *
+ * (started from arm version - for VIPT alias handling)
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+
+#include <asm/cacheflush.h>
+
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+ (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+/*
+ * Ensure that shared mappings are correctly aligned to
+ * avoid aliasing issues with VIPT caches.
+ * We need to ensure that
+ * a specific page of an object is always mapped at a multiple of
+ * SHMLBA bytes.
+ */
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
+ struct vm_unmapped_area_info info;
+
+ /*
+ * We only need to do colour alignment if D cache aliases.
+ */
+ if (aliasing)
+ do_align = filp || (flags & MAP_SHARED);
+
+ /*
+ * We enforce the MAP_FIXED case.
+ */
+ if (flags & MAP_FIXED) {
+ if (aliasing && flags & MAP_SHARED &&
+ (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ return vm_unmapped_area(&info);
+}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_U_NONE,
+ [VM_READ] = PAGE_U_R,
+ [VM_WRITE] = PAGE_U_R,
+ [VM_WRITE | VM_READ] = PAGE_U_R,
+ [VM_EXEC] = PAGE_U_X_R,
+ [VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED] = PAGE_U_NONE,
+ [VM_SHARED | VM_READ] = PAGE_U_R,
+ [VM_SHARED | VM_WRITE] = PAGE_U_W_R,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R,
+ [VM_SHARED | VM_EXEC] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
new file mode 100644
index 0000000000..e536b2dcd4
--- /dev/null
+++ b/arch/arc/mm/tlb.c
@@ -0,0 +1,761 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/bug.h>
+#include <linux/mm_types.h>
+
+#include <asm/arcregs.h>
+#include <asm/setup.h>
+#include <asm/mmu_context.h>
+#include <asm/mmu.h>
+
+/* A copy of the ASID from the PID reg is kept in asid_cache */
+DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
+
+static struct cpuinfo_arc_mmu {
+ unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
+} mmuinfo;
+
+/*
+ * Utility Routine to erase a J-TLB entry
+ * Caller needs to setup Index Reg (manually or via getIndex)
+ */
+static inline void __tlb_entry_erase(void)
+{
+ write_aux_reg(ARC_REG_TLBPD1, 0);
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
+ write_aux_reg(ARC_REG_TLBPD0, 0);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+}
+
+static void utlb_invalidate(void)
+{
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
+}
+
+#ifdef CONFIG_ARC_MMU_V3
+
+static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
+{
+ unsigned int idx;
+
+ write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
+
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ return idx;
+}
+
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
+{
+ unsigned int idx;
+
+ /* Locate the TLB entry for this vaddr + ASID */
+ idx = tlb_entry_lkup(vaddr_n_asid);
+
+ /* No error means entry found, zero it out */
+ if (likely(!(idx & TLB_LKUP_ERR))) {
+ __tlb_entry_erase();
+ } else {
+ /* Duplicate entry error */
+ WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
+ vaddr_n_asid);
+ }
+}
+
+static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
+{
+ unsigned int idx;
+
+ /*
+ * First verify if entry for this vaddr+ASID already exists
+ * This also sets up PD0 (vaddr, ASID..) for final commit
+ */
+ idx = tlb_entry_lkup(pd0);
+
+ /*
+ * If Not already present get a free slot from MMU.
+ * Otherwise, Probe would have located the entry and set INDEX Reg
+ * with existing location. This will cause Write CMD to over-write
+ * existing entry with new PD0 and PD1
+ */
+ if (likely(idx & TLB_LKUP_ERR))
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
+
+ /* setup the other half of TLB entry (pfn, rwx..) */
+ write_aux_reg(ARC_REG_TLBPD1, pd1);
+
+ /*
+ * Commit the Entry to MMU
+ * It doesn't sound safe to use the TLBWriteNI cmd here
+ * which doesn't flush uTLBs. I'd rather be safe than sorry.
+ */
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+}
+
+#else /* MMUv4 */
+
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
+{
+ write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
+}
+
+static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
+{
+ write_aux_reg(ARC_REG_TLBPD0, pd0);
+
+ if (!is_pae40_enabled()) {
+ write_aux_reg(ARC_REG_TLBPD1, pd1);
+ } else {
+ write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
+ write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
+ }
+
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
+}
+
+#endif
+
+/*
+ * Un-conditionally (without lookup) erase the entire MMU contents
+ */
+
+noinline void local_flush_tlb_all(void)
+{
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ unsigned long flags;
+ unsigned int entry;
+ int num_tlb = mmu->sets * mmu->ways;
+
+ local_irq_save(flags);
+
+ /* Load PD0 and PD1 with template for a Blank Entry */
+ write_aux_reg(ARC_REG_TLBPD1, 0);
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
+ write_aux_reg(ARC_REG_TLBPD0, 0);
+
+ for (entry = 0; entry < num_tlb; entry++) {
+ /* write this entry to the TLB */
+ write_aux_reg(ARC_REG_TLBINDEX, entry);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
+ }
+
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ const int stlb_idx = 0x800;
+
+ /* Blank sTLB entry */
+ write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
+
+ for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
+ write_aux_reg(ARC_REG_TLBINDEX, entry);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
+ }
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Flush the entire MM for userland. The fastest way is to move to Next ASID
+ */
+noinline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ /*
+ * Small optimisation courtesy IA64
+ * flush_mm called during fork,exit,munmap etc, multiple times as well.
+ * Only for fork( ) do we need to move parent to a new MMU ctxt,
+ * all other cases are NOPs, hence this check.
+ */
+ if (atomic_read(&mm->mm_users) == 0)
+ return;
+
+ /*
+ * - Move to a new ASID, but only if the mm is still wired in
+ * (Android Binder ended up calling this for vma->mm != tsk->mm,
+ * causing h/w - s/w ASID to get out of sync)
+ * - Also get_new_mmu_context() new implementation allocates a new
+ * ASID only if it is not allocated already - so unallocate first
+ */
+ destroy_context(mm);
+ if (current->mm == mm)
+ get_new_mmu_context(mm);
+}
+
+/*
+ * Flush a Range of TLB entries for userland.
+ * @start is inclusive, while @end is exclusive
+ * Difference between this and Kernel Range Flush is
+ * -Here the fastest way (if range is too large) is to move to next ASID
+ * without doing any explicit Shootdown
+ * -In case of kernel Flush, entry has to be shot down explicitly
+ */
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ const unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+
+ /* If range @start to @end is more than 32 TLB entries deep,
+ * its better to move to a new ASID rather than searching for
+ * individual entries and then shooting them down
+ *
+ * The calc above is rough, doesn't account for unaligned parts,
+ * since this is heuristics based anyways
+ */
+ if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+ local_flush_tlb_mm(vma->vm_mm);
+ return;
+ }
+
+ /*
+ * @start moved to page start: this alone suffices for checking
+ * loop end condition below, w/o need for aligning @end to end
+ * e.g. 2000 to 4001 will anyhow loop twice
+ */
+ start &= PAGE_MASK;
+
+ local_irq_save(flags);
+
+ if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
+ while (start < end) {
+ tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
+ start += PAGE_SIZE;
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
+ * @start, @end interpreted as kvaddr
+ * Interestingly, shared TLB entries can also be flushed using just
+ * @start,@end alone (interpreted as user vaddr), although technically SASID
+ * is also needed. However our smart TLbProbe lookup takes care of that.
+ */
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /* exactly same as above, except for TLB entry not taking ASID */
+
+ if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+ local_flush_tlb_all();
+ return;
+ }
+
+ start &= PAGE_MASK;
+
+ local_irq_save(flags);
+ while (start < end) {
+ tlb_entry_erase(start);
+ start += PAGE_SIZE;
+ }
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Delete TLB entry in MMU for a given page (??? address)
+ * NOTE One TLB entry contains translation for single PAGE
+ */
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ const unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+
+ /* Note that it is critical that interrupts are DISABLED between
+ * checking the ASID and using it flush the TLB entry
+ */
+ local_irq_save(flags);
+
+ if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
+ tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
+ }
+
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_SMP
+
+struct tlb_args {
+ struct vm_area_struct *ta_vma;
+ unsigned long ta_start;
+ unsigned long ta_end;
+};
+
+static inline void ipi_flush_tlb_page(void *arg)
+{
+ struct tlb_args *ta = arg;
+
+ local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+}
+
+static inline void ipi_flush_tlb_range(void *arg)
+{
+ struct tlb_args *ta = arg;
+
+ local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ipi_flush_pmd_tlb_range(void *arg)
+{
+ struct tlb_args *ta = arg;
+
+ local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+#endif
+
+static inline void ipi_flush_tlb_kernel_range(void *arg)
+{
+ struct tlb_args *ta = (struct tlb_args *)arg;
+
+ local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
+ mm, 1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ struct tlb_args ta = {
+ .ta_vma = vma,
+ .ta_start = uaddr
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct tlb_args ta = {
+ .ta_vma = vma,
+ .ta_start = start,
+ .ta_end = end
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct tlb_args ta = {
+ .ta_vma = vma,
+ .ta_start = start,
+ .ta_end = end
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
+}
+#endif
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct tlb_args ta = {
+ .ta_start = start,
+ .ta_end = end
+ };
+
+ on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
+}
+#endif
+
+/*
+ * Routine to create a TLB entry
+ */
+static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
+{
+ unsigned long flags;
+ unsigned int asid_or_sasid, rwx;
+ unsigned long pd0;
+ phys_addr_t pd1;
+
+ /*
+ * create_tlb() assumes that current->mm == vma->mm, since
+ * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
+ * -completes the lazy write to SASID reg (again valid for curr tsk)
+ *
+ * Removing the assumption involves
+ * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
+ * -More importantly it makes this handler inconsistent with fast-path
+ * TLB Refill handler which always deals with "current"
+ *
+ * Lets see the use cases when current->mm != vma->mm and we land here
+ * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
+ * Here VM wants to pre-install a TLB entry for user stack while
+ * current->mm still points to pre-execve mm (hence the condition).
+ * However the stack vaddr is soon relocated (randomization) and
+ * move_page_tables() tries to undo that TLB entry.
+ * Thus not creating TLB entry is not any worse.
+ *
+ * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
+ * breakpoint in debugged task. Not creating a TLB now is not
+ * performance critical.
+ *
+ * Both the cases above are not good enough for code churn.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+
+ vaddr &= PAGE_MASK;
+
+ /* update this PTE credentials */
+ pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
+
+ /* Create HW TLB(PD0,PD1) from PTE */
+
+ /* ASID for this task */
+ asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
+
+ /*
+ * ARC MMU provides fully orthogonal access bits for K/U mode,
+ * however Linux only saves 1 set to save PTE real-estate
+ * Here we convert 3 PTE bits into 6 MMU bits:
+ * -Kernel only entries have Kr Kw Kx 0 0 0
+ * -User entries have mirrored K and U bits
+ */
+ rwx = pte_val(*ptep) & PTE_BITS_RWX;
+
+ if (pte_val(*ptep) & _PAGE_GLOBAL)
+ rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
+ else
+ rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
+
+ pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
+
+ tlb_entry_insert(pd0, pd1);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Called at the end of pagefault, for a userspace mapped page
+ * -pre-install the corresponding TLB entry into MMU
+ * -Finalize the delayed D-cache flush of kernel mapping of page due to
+ * flush_dcache_page(), copy_user_page()
+ *
+ * Note that flush (when done) involves both WBACK - so physical page is
+ * in sync as well as INV - so any non-congruent aliases don't remain
+ */
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)
+{
+ unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+ phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
+ struct page *page = pfn_to_page(pte_pfn(*ptep));
+
+ create_tlb(vma, vaddr, ptep);
+
+ if (page == ZERO_PAGE(0)) {
+ return;
+ }
+
+ /*
+ * Exec page : Independent of aliasing/page-color considerations,
+ * since icache doesn't snoop dcache on ARC, any dirty
+ * K-mapping of a code page needs to be wback+inv so that
+ * icache fetch by userspace sees code correctly.
+ * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
+ * so userspace sees the right data.
+ * (Avoids the flush for Non-exec + congruent mapping case)
+ */
+ if ((vma->vm_flags & VM_EXEC) ||
+ addr_not_cache_congruent(paddr, vaddr)) {
+ struct folio *folio = page_folio(page);
+ int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
+ if (dirty) {
+ unsigned long offset = offset_in_folio(folio, paddr);
+ nr = folio_nr_pages(folio);
+ paddr -= offset;
+ vaddr -= offset;
+ /* wback + inv dcache lines (K-mapping) */
+ __flush_dcache_pages(paddr, paddr, nr);
+
+ /* invalidate any existing icache lines (U-mapping) */
+ if (vma->vm_flags & VM_EXEC)
+ __inv_icache_pages(paddr, vaddr, nr);
+ }
+ }
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+/*
+ * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
+ * support.
+ *
+ * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
+ * new bit "SZ" in TLB page descriptor to distinguish between them.
+ * Super Page size is configurable in hardware (4K to 16M), but fixed once
+ * RTL builds.
+ *
+ * The exact THP size a Linux configuration will support is a function of:
+ * - MMU page size (typical 8K, RTL fixed)
+ * - software page walker address split between PGD:PTE:PFN (typical
+ * 11:8:13, but can be changed with 1 line)
+ * So for above default, THP size supported is 8K * (2^8) = 2M
+ *
+ * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
+ * reduces to 1 level (as PTE is folded into PGD and canonically referred
+ * to as PMD).
+ * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
+ */
+
+void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd)
+{
+ pte_t pte = __pte(pmd_val(*pmd));
+ update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
+}
+
+void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned int cpu;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+
+ if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
+ unsigned int asid = hw_pid(vma->vm_mm, cpu);
+
+ /* No need to loop here: this will always be for 1 Huge Page */
+ tlb_entry_erase(start | _PAGE_HW_SZ | asid);
+ }
+
+ local_irq_restore(flags);
+}
+
+#endif
+
+/* Read the Cache Build Configuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation is done here, simply read/convert the BCRs
+ */
+int arc_mmu_mumbojumbo(int c, char *buf, int len)
+{
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ unsigned int bcr, u_dtlb, u_itlb, sasid;
+ struct bcr_mmu_3 *mmu3;
+ struct bcr_mmu_4 *mmu4;
+ char super_pg[64] = "";
+ int n = 0;
+
+ bcr = read_aux_reg(ARC_REG_MMU_BCR);
+ mmu->ver = (bcr >> 24);
+
+ if (is_isa_arcompact() && mmu->ver == 3) {
+ mmu3 = (struct bcr_mmu_3 *)&bcr;
+ mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
+ mmu->sets = 1 << mmu3->sets;
+ mmu->ways = 1 << mmu3->ways;
+ u_dtlb = mmu3->u_dtlb;
+ u_itlb = mmu3->u_itlb;
+ sasid = mmu3->sasid;
+ } else {
+ mmu4 = (struct bcr_mmu_4 *)&bcr;
+ mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
+ mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
+ mmu->sets = 64 << mmu4->n_entry;
+ mmu->ways = mmu4->n_ways * 2;
+ u_dtlb = mmu4->u_dtlb * 4;
+ u_itlb = mmu4->u_itlb * 4;
+ sasid = mmu4->sasid;
+ mmu->pae = mmu4->pae;
+ }
+
+ if (mmu->s_pg_sz_m)
+ scnprintf(super_pg, 64, "/%dM%s",
+ mmu->s_pg_sz_m,
+ IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
+
+ n += scnprintf(buf + n, len - n,
+ "MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
+ mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
+ mmu->sets, mmu->ways,
+ u_dtlb, u_itlb,
+ IS_AVAIL1(sasid, ", SASID"),
+ IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
+
+ return n;
+}
+
+int pae40_exist_but_not_enab(void)
+{
+ return mmuinfo.pae && !is_pae40_enabled();
+}
+
+void arc_mmu_init(void)
+{
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ int compat = 0;
+
+ /*
+ * Can't be done in processor.h due to header include dependencies
+ */
+ BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
+
+ /*
+ * stack top size sanity check,
+ * Can't be done in processor.h due to header include dependencies
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
+
+ /*
+ * Ensure that MMU features assumed by kernel exist in hardware.
+ * - For older ARC700 cpus, only v3 supported
+ * - For HS cpus, v4 was baseline and v5 is backwards compatible
+ * (will run older software).
+ */
+ if (is_isa_arcompact() && mmu->ver == 3)
+ compat = 1;
+ else if (is_isa_arcv2() && mmu->ver >= 4)
+ compat = 1;
+
+ if (!compat)
+ panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
+
+ if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
+ panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
+
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
+ panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
+ (unsigned long)TO_MB(HPAGE_PMD_SIZE));
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
+ panic("Hardware doesn't support PAE40\n");
+
+ /* Enable the MMU with ASID 0 */
+ mmu_setup_asid(NULL, 0);
+
+ /* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
+ mmu_setup_pgd(NULL, swapper_pg_dir);
+
+ if (pae40_exist_but_not_enab())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
+}
+
+/*
+ * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
+ * The mapping is Column-first.
+ * --------------------- -----------
+ * |way0|way1|way2|way3| |way0|way1|
+ * --------------------- -----------
+ * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
+ * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
+ * ~ ~ ~ ~
+ * [set127] | 508| 509| 510| 511| | 254| 255|
+ * --------------------- -----------
+ * For normal operations we don't(must not) care how above works since
+ * MMU cmd getIndex(vaddr) abstracts that out.
+ * However for walking WAYS of a SET, we need to know this
+ */
+#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
+
+/* Handling of Duplicate PD (TLB entry) in MMU.
+ * -Could be due to buggy customer tapeouts or obscure kernel bugs
+ * -MMU complaints not at the time of duplicate PD installation, but at the
+ * time of lookup matching multiple ways.
+ * -Ideally these should never happen - but if they do - workaround by deleting
+ * the duplicate one.
+ * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
+ */
+volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
+
+void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ unsigned long flags;
+ int set, n_ways = mmu->ways;
+
+ n_ways = min(n_ways, 4);
+ BUG_ON(mmu->ways > 4);
+
+ local_irq_save(flags);
+
+ /* loop thru all sets of TLB */
+ for (set = 0; set < mmu->sets; set++) {
+
+ int is_valid, way;
+ unsigned int pd0[4];
+
+ /* read out all the ways of current set */
+ for (way = 0, is_valid = 0; way < n_ways; way++) {
+ write_aux_reg(ARC_REG_TLBINDEX,
+ SET_WAY_TO_IDX(mmu, set, way));
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
+ pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
+ is_valid |= pd0[way] & _PAGE_PRESENT;
+ pd0[way] &= PAGE_MASK;
+ }
+
+ /* If all the WAYS in SET are empty, skip to next SET */
+ if (!is_valid)
+ continue;
+
+ /* Scan the set for duplicate ways: needs a nested loop */
+ for (way = 0; way < n_ways - 1; way++) {
+
+ int n;
+
+ if (!pd0[way])
+ continue;
+
+ for (n = way + 1; n < n_ways; n++) {
+ if (pd0[way] != pd0[n])
+ continue;
+
+ if (!dup_pd_silent)
+ pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
+ pd0[way], set, way, n);
+
+ /*
+ * clear entry @way and not @n.
+ * This is critical to our optimised loop
+ */
+ pd0[way] = 0;
+ write_aux_reg(ARC_REG_TLBINDEX,
+ SET_WAY_TO_IDX(mmu, set, way));
+ __tlb_entry_erase();
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
new file mode 100644
index 0000000000..e054780a8f
--- /dev/null
+++ b/arch/arc/mm/tlbex.S
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * TLB Exception Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Vineetg: April 2011 :
+ * -MMU v1: moved out legacy code into a seperate file
+ * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
+ * helps avoid a shift when preparing PD0 from PTE
+ *
+ * Vineetg: July 2009
+ * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
+ * entry, so that it doesn't knock out it's I-TLB entry
+ * -Some more fine tuning:
+ * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
+ *
+ * Vineetg: July 2009
+ * -Practically rewrote the I/D TLB Miss handlers
+ * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
+ * Hence Leaner by 1.5 K
+ * Used Conditional arithmetic to replace excessive branching
+ * Also used short instructions wherever possible
+ *
+ * Vineetg: Aug 13th 2008
+ * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
+ * more information in case of a Fatality
+ *
+ * Vineetg: March 25th Bug #92690
+ * -Added Debug Code to check if sw-ASID == hw-ASID
+
+ * Rahul Trivedi, Amit Bhor: Codito Technologies 2004
+ */
+
+#include <linux/linkage.h>
+#include <linux/pgtable.h>
+#include <asm/entry.h>
+#include <asm/mmu.h>
+#include <asm/arcregs.h>
+#include <asm/cache.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_ISA_ARCOMPACT
+;-----------------------------------------------------------------
+; ARC700 Exception Handling doesn't auto-switch stack and it only provides
+; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
+;
+; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
+; "global" is used to free-up FIRST core reg to be able to code the rest of
+; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
+; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
+; need to be saved as well by extending the "global" to be 4 words. Hence
+; ".size ex_saved_reg1, 16"
+; [All of this dance is to avoid stack switching for each TLB Miss, since we
+; only need to save only a handful of regs, as opposed to complete reg file]
+;
+; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
+; core reg as it will not be SMP safe.
+; Thus scratch AUX reg is used (and no longer used to cache task PGD).
+; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
+; Epilogue thus has to locate the "per-cpu" storage for regs.
+; To avoid cache line bouncing the per-cpu global is aligned/sized per
+; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
+; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
+
+; As simple as that....
+;--------------------------------------------------------------------------
+
+; scratch memory to save [r0-r3] used to code TLB refill Handler
+ARCFP_DATA ex_saved_reg1
+ .align 1 << L1_CACHE_SHIFT
+ .type ex_saved_reg1, @object
+#ifdef CONFIG_SMP
+ .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+ex_saved_reg1:
+ .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+#else
+ .size ex_saved_reg1, 16
+ex_saved_reg1:
+ .zero 16
+#endif
+
+.macro TLBMISS_FREEUP_REGS
+#ifdef CONFIG_SMP
+ sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
+ GET_CPU_ID r0 ; get to per cpu scratch mem,
+ asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
+ add r0, @ex_saved_reg1, r0
+#else
+ st r0, [@ex_saved_reg1]
+ mov_s r0, @ex_saved_reg1
+#endif
+ st_s r1, [r0, 4]
+ st_s r2, [r0, 8]
+ st_s r3, [r0, 12]
+.endm
+
+.macro TLBMISS_RESTORE_REGS
+#ifdef CONFIG_SMP
+ GET_CPU_ID r0 ; get to per cpu scratch mem
+ asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
+ add r0, @ex_saved_reg1, r0
+ ld_s r3, [r0,12]
+ ld_s r2, [r0, 8]
+ ld_s r1, [r0, 4]
+ lr r0, [ARC_REG_SCRATCH_DATA0]
+#else
+ mov_s r0, @ex_saved_reg1
+ ld_s r3, [r0,12]
+ ld_s r2, [r0, 8]
+ ld_s r1, [r0, 4]
+ ld_s r0, [r0]
+#endif
+.endm
+
+#else /* ARCv2 */
+
+.macro TLBMISS_FREEUP_REGS
+#ifdef CONFIG_ARC_HAS_LL64
+ std r0, [sp, -16]
+ std r2, [sp, -8]
+#else
+ PUSH r0
+ PUSH r1
+ PUSH r2
+ PUSH r3
+#endif
+.endm
+
+.macro TLBMISS_RESTORE_REGS
+#ifdef CONFIG_ARC_HAS_LL64
+ ldd r0, [sp, -16]
+ ldd r2, [sp, -8]
+#else
+ POP r3
+ POP r2
+ POP r1
+ POP r0
+#endif
+.endm
+
+#endif
+
+;============================================================================
+;TLB Miss handling Code
+;============================================================================
+
+#ifndef PMD_SHIFT
+#define PMD_SHIFT PUD_SHIFT
+#endif
+
+#ifndef PUD_SHIFT
+#define PUD_SHIFT PGDIR_SHIFT
+#endif
+
+;-----------------------------------------------------------------------------
+; This macro does the page-table lookup for the faulting address.
+; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
+.macro LOAD_FAULT_PTE
+
+ lr r2, [efa]
+
+#ifdef CONFIG_ISA_ARCV2
+ lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
+#else
+ GET_CURR_TASK_ON_CPU r1
+ ld r1, [r1, TASK_ACT_MM]
+ ld r1, [r1, MM_PGD]
+#endif
+
+ lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
+ ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr
+ tst r3, r3
+ bz do_slow_path_pf ; if no Page Table, do page fault
+
+#if CONFIG_PGTABLE_LEVELS > 3
+ lsr r0, r2, PUD_SHIFT ; Bits for indexing into PUD
+ and r0, r0, (PTRS_PER_PUD - 1)
+ ld.as r1, [r3, r0] ; PMD entry
+ tst r1, r1
+ bz do_slow_path_pf
+ mov r3, r1
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+ lsr r0, r2, PMD_SHIFT ; Bits for indexing into PMD
+ and r0, r0, (PTRS_PER_PMD - 1)
+ ld.as r1, [r3, r0] ; PMD entry
+ tst r1, r1
+ bz do_slow_path_pf
+ mov r3, r1
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp)
+ add2.nz r1, r1, r0
+ bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk
+ mov.nz r0, r3
+
+#endif
+ and r1, r3, PAGE_MASK
+
+ ; Get the PTE entry: The idea is
+ ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
+ ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
+ ; (3) z = (pgtbl + y * 4)
+
+#ifdef CONFIG_ARC_HAS_PAE40
+#define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */
+#else
+#define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */
+#endif
+
+ ; multiply in step (3) above avoided by shifting lesser in step (1)
+ lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG )
+ and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG )
+ ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40)
+ ; r1: PTE ptr
+
+2:
+
+.endm
+
+;-----------------------------------------------------------------
+; Convert Linux PTE entry into TLB entry
+; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
+; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI])
+; IN: r0 = PTE, r1 = ptr to PTE
+
+.macro CONV_PTE_TO_TLB
+ and r3, r0, PTE_BITS_RWX ; r w x
+ asl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only)
+ and.f 0, r0, _PAGE_GLOBAL
+ or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
+
+ and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
+ or r3, r3, r2
+
+ sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C
+#ifdef CONFIG_ARC_HAS_PAE40
+ ld r3, [r1, 4] ; paddr[39..32]
+ sr r3, [ARC_REG_TLBPD1HI]
+#endif
+
+ and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
+
+ lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid
+
+ or r3, r3, r2 ; S | vaddr | {sasid|asid}
+ sr r3,[ARC_REG_TLBPD0] ; rewrite PD0
+.endm
+
+;-----------------------------------------------------------------
+; Commit the TLB entry into MMU
+
+.macro COMMIT_ENTRY_TO_MMU
+#ifdef CONFIG_ARC_MMU_V3
+
+ /* Get free TLB slot: Set = computed from vaddr, way = random */
+ sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+
+ /* Commit the Write */
+ sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
+
+#else
+ sr TLBInsertEntry, [ARC_REG_TLBCOMMAND]
+#endif
+
+88:
+.endm
+
+
+ARCFP_CODE ;Fast Path Code, candidate for ICCM
+
+;-----------------------------------------------------------------------------
+; I-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ENTRY(EV_TLBMissI)
+
+ TLBMISS_FREEUP_REGS
+
+ ;----------------------------------------------------------------
+ ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
+ LOAD_FAULT_PTE
+
+ ;----------------------------------------------------------------
+ ; VERIFY_PTE: Check if PTE permissions approp for executing code
+ cmp_s r2, VMALLOC_START
+ mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE)
+ or.hs r2, r2, _PAGE_GLOBAL
+
+ and r3, r0, r2 ; Mask out NON Flag bits from PTE
+ xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
+ bnz do_slow_path_pf
+
+ ; Let Linux VM know that the page was accessed
+ or r0, r0, _PAGE_ACCESSED ; set Accessed Bit
+ st_s r0, [r1] ; Write back PTE
+
+ CONV_PTE_TO_TLB
+ COMMIT_ENTRY_TO_MMU
+ TLBMISS_RESTORE_REGS
+EV_TLBMissI_fast_ret: ; additional label for VDK OS-kit instrumentation
+ rtie
+
+END(EV_TLBMissI)
+
+;-----------------------------------------------------------------------------
+; D-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ENTRY(EV_TLBMissD)
+
+ TLBMISS_FREEUP_REGS
+
+ ;----------------------------------------------------------------
+ ; Get the PTE corresponding to V-addr accessed
+ ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
+ LOAD_FAULT_PTE
+
+ ;----------------------------------------------------------------
+ ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
+
+ cmp_s r2, VMALLOC_START
+ mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE
+ or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only
+
+ ; Linux PTE [RWX] bits are semantically overloaded:
+ ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc)
+ ; -Otherwise they are user-mode permissions, and those are exactly
+ ; same for kernel mode as well (e.g. copy_(to|from)_user)
+
+ lr r3, [ecr]
+ btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
+ or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
+ or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
+ ; Above laddering takes care of XCHG access (both R and W)
+
+ ; By now, r2 setup with all the Flags we need to check in PTE
+ and r3, r0, r2 ; Mask out NON Flag bits from PTE
+ brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
+
+ ;----------------------------------------------------------------
+ ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
+ or r0, r0, _PAGE_ACCESSED ; Accessed bit always
+ or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well
+ st_s r0, [r1] ; Write back PTE
+
+ CONV_PTE_TO_TLB
+
+ COMMIT_ENTRY_TO_MMU
+ TLBMISS_RESTORE_REGS
+EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation
+ rtie
+
+;-------- Common routine to call Linux Page Fault Handler -----------
+do_slow_path_pf:
+
+#ifdef CONFIG_ISA_ARCV2
+ ; Set Z flag if exception in U mode. Hardware micro-ops do this on any
+ ; taken interrupt/exception, and thus is already the case at the entry
+ ; above, but ensuing code would have already clobbered.
+ ; EXCEPTION_PROLOGUE called in slow path, relies on correct Z flag set
+
+ lr r2, [erstatus]
+ and r2, r2, STATUS_U_MASK
+ bxor.f 0, r2, STATUS_U_BIT
+#endif
+
+ ; Restore the 4-scratch regs saved by fast path miss handler
+ TLBMISS_RESTORE_REGS
+
+ ; Slow path TLB Miss handled as a regular ARC Exception
+ ; (stack switching / save the complete reg-file).
+ b call_do_page_fault
+END(EV_TLBMissD)
diff --git a/arch/arc/plat-axs10x/Kconfig b/arch/arc/plat-axs10x/Kconfig
new file mode 100644
index 0000000000..b9652c69d1
--- /dev/null
+++ b/arch/arc/plat-axs10x/Kconfig
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+#
+
+menuconfig ARC_PLAT_AXS10X
+ bool "Synopsys ARC AXS10x Software Development Platforms"
+ select DW_APB_ICTL
+ select GPIO_DWAPB
+ select OF_GPIO
+ select HAVE_PCI
+ select GENERIC_IRQ_CHIP
+ select GPIOLIB
+ select AXS101 if ISA_ARCOMPACT
+ select AXS103 if ISA_ARCV2
+ help
+ Support for the ARC AXS10x Software Development Platforms.
+
+ The AXS10x Platforms consist of a mainboard with peripherals,
+ on which several daughter cards can be placed. The daughter cards
+ typically contain a CPU and memory.
+
+if ARC_PLAT_AXS10X
+
+config AXS101
+ depends on ISA_ARCOMPACT
+ bool "AXS101 with AXC001 CPU Card (ARC 770D/EM6/AS221)"
+ help
+ This adds support for the 770D/EM6/AS221 CPU Card. Only the ARC
+ 770D is supported in Linux.
+
+ The AXS101 Platform consists of an AXS10x mainboard with
+ this daughtercard. Please use the axs101.dts device tree
+ with this configuration.
+
+config AXS103
+ bool "AXS103 with AXC003 CPU Card (ARC HS38x)"
+ depends on ISA_ARCV2
+ help
+ This adds support for the HS38x CPU Card.
+
+ The AXS103 Platform consists of an AXS10x mainboard with
+ this daughtercard. Please use the axs103.dts device tree
+ with this configuration.
+
+endif
diff --git a/arch/arc/plat-axs10x/Makefile b/arch/arc/plat-axs10x/Makefile
new file mode 100644
index 0000000000..cebe5716ee
--- /dev/null
+++ b/arch/arc/plat-axs10x/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+#
+
+obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x.o
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
new file mode 100644
index 0000000000..1feb990a56
--- /dev/null
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AXS101/AXS103 Software Development Platform
+ *
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/io.h>
+#include <asm/mach_desc.h>
+#include <soc/arc/mcip.h>
+
+#define AXS_MB_CGU 0xE0010000
+#define AXS_MB_CREG 0xE0011000
+
+#define CREG_MB_IRQ_MUX (AXS_MB_CREG + 0x214)
+#define CREG_MB_SW_RESET (AXS_MB_CREG + 0x220)
+#define CREG_MB_VER (AXS_MB_CREG + 0x230)
+#define CREG_MB_CONFIG (AXS_MB_CREG + 0x234)
+
+#define AXC001_CREG 0xF0001000
+#define AXC001_GPIO_INTC 0xF0003000
+
+static void __init axs10x_enable_gpio_intc_wire(void)
+{
+ /*
+ * Peripherals on CPU Card and Mother Board are wired to cpu intc via
+ * intermediate DW APB GPIO blocks (mainly for debouncing)
+ *
+ * ---------------------
+ * | snps,arc700-intc |
+ * ---------------------
+ * | #7 | #15
+ * ------------------- -------------------
+ * | snps,dw-apb-gpio | | snps,dw-apb-gpio |
+ * ------------------- -------------------
+ * | #12 |
+ * | [ Debug UART on cpu card ]
+ * |
+ * ------------------------
+ * | snps,dw-apb-intc (MB)|
+ * ------------------------
+ * | | | |
+ * [eth] [uart] [... other perip on Main Board]
+ *
+ * Current implementation of "irq-dw-apb-ictl" driver doesn't work well
+ * with stacked INTCs. In particular problem happens if its master INTC
+ * not yet instantiated. See discussion here -
+ * https://lore.kernel.org/lkml/54F6FE2C.7020309@synopsys.com
+ *
+ * So setup the first gpio block as a passive pass thru and hide it from
+ * DT hardware topology - connect MB intc directly to cpu intc
+ * The GPIO "wire" needs to be init nevertheless (here)
+ *
+ * One side adv is that peripheral interrupt handling avoids one nested
+ * intc ISR hop
+ */
+#define GPIO_INTEN (AXC001_GPIO_INTC + 0x30)
+#define GPIO_INTMASK (AXC001_GPIO_INTC + 0x34)
+#define GPIO_INTTYPE_LEVEL (AXC001_GPIO_INTC + 0x38)
+#define GPIO_INT_POLARITY (AXC001_GPIO_INTC + 0x3c)
+#define MB_TO_GPIO_IRQ 12
+
+ iowrite32(~(1 << MB_TO_GPIO_IRQ), (void __iomem *) GPIO_INTMASK);
+ iowrite32(0, (void __iomem *) GPIO_INTTYPE_LEVEL);
+ iowrite32(~0, (void __iomem *) GPIO_INT_POLARITY);
+ iowrite32(1 << MB_TO_GPIO_IRQ, (void __iomem *) GPIO_INTEN);
+}
+
+static void __init axs10x_print_board_ver(unsigned int creg, const char *str)
+{
+ union ver {
+ struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:11, y:12, m:4, d:5;
+#else
+ unsigned int d:5, m:4, y:12, pad:11;
+#endif
+ };
+ unsigned int val;
+ } board;
+
+ board.val = ioread32((void __iomem *)creg);
+ pr_info("AXS: %s FPGA Date: %u-%u-%u\n", str, board.d, board.m,
+ board.y);
+}
+
+static void __init axs10x_early_init(void)
+{
+ int mb_rev;
+ char mb[32];
+
+ /* Determine motherboard version */
+ if (ioread32((void __iomem *) CREG_MB_CONFIG) & (1 << 28))
+ mb_rev = 3; /* HT-3 (rev3.0) */
+ else
+ mb_rev = 2; /* HT-2 (rev2.0) */
+
+ axs10x_enable_gpio_intc_wire();
+
+ scnprintf(mb, 32, "MainBoard v%d", mb_rev);
+ axs10x_print_board_ver(CREG_MB_VER, mb);
+}
+
+#ifdef CONFIG_AXS101
+
+#define CREG_CPU_ADDR_770 (AXC001_CREG + 0x20)
+#define CREG_CPU_ADDR_TUNN (AXC001_CREG + 0x60)
+#define CREG_CPU_ADDR_770_UPD (AXC001_CREG + 0x34)
+#define CREG_CPU_ADDR_TUNN_UPD (AXC001_CREG + 0x74)
+
+#define CREG_CPU_ARC770_IRQ_MUX (AXC001_CREG + 0x114)
+#define CREG_CPU_GPIO_UART_MUX (AXC001_CREG + 0x120)
+
+/*
+ * Set up System Memory Map for ARC cpu / peripherals controllers
+ *
+ * Each AXI master has a 4GB memory map specified as 16 apertures of 256MB, each
+ * of which maps to a corresponding 256MB aperture in Target slave memory map.
+ *
+ * e.g. ARC cpu AXI Master's aperture 8 (0x8000_0000) is mapped to aperture 0
+ * (0x0000_0000) of DDR Port 0 (slave #1)
+ *
+ * Access from cpu to MB controllers such as GMAC is setup using AXI Tunnel:
+ * which has master/slaves on both ends.
+ * e.g. aperture 14 (0xE000_0000) of ARC cpu is mapped to aperture 14
+ * (0xE000_0000) of CPU Card AXI Tunnel slave (slave #3) which is mapped to
+ * MB AXI Tunnel Master, which also has a mem map setup
+ *
+ * In the reverse direction, MB AXI Masters (e.g. GMAC) mem map is setup
+ * to map to MB AXI Tunnel slave which connects to CPU Card AXI Tunnel Master
+ */
+struct aperture {
+ unsigned int slave_sel:4, slave_off:4, pad:24;
+};
+
+/* CPU Card target slaves */
+#define AXC001_SLV_NONE 0
+#define AXC001_SLV_DDR_PORT0 1
+#define AXC001_SLV_SRAM 2
+#define AXC001_SLV_AXI_TUNNEL 3
+#define AXC001_SLV_AXI2APB 6
+#define AXC001_SLV_DDR_PORT1 7
+
+/* MB AXI Target slaves */
+#define AXS_MB_SLV_NONE 0
+#define AXS_MB_SLV_AXI_TUNNEL_CPU 1
+#define AXS_MB_SLV_AXI_TUNNEL_HAPS 2
+#define AXS_MB_SLV_SRAM 3
+#define AXS_MB_SLV_CONTROL 4
+
+/* MB AXI masters */
+#define AXS_MB_MST_TUNNEL_CPU 0
+#define AXS_MB_MST_USB_OHCI 10
+
+/*
+ * memmap for ARC core on CPU Card
+ */
+static const struct aperture axc001_memmap[16] = {
+ {AXC001_SLV_AXI_TUNNEL, 0x0},
+ {AXC001_SLV_AXI_TUNNEL, 0x1},
+ {AXC001_SLV_SRAM, 0x0}, /* 0x2000_0000: Local SRAM */
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_DDR_PORT0, 0x0}, /* 0x8000_0000: DDR 0..256M */
+ {AXC001_SLV_DDR_PORT0, 0x1}, /* 0x9000_0000: DDR 256..512M */
+ {AXC001_SLV_DDR_PORT0, 0x2},
+ {AXC001_SLV_DDR_PORT0, 0x3},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_AXI_TUNNEL, 0xD},
+ {AXC001_SLV_AXI_TUNNEL, 0xE}, /* MB: CREG, CGU... */
+ {AXC001_SLV_AXI2APB, 0x0}, /* CPU Card local CREG, CGU... */
+};
+
+/*
+ * memmap for CPU Card AXI Tunnel Master (for access by MB controllers)
+ * GMAC (MB) -> MB AXI Tunnel slave -> CPU Card AXI Tunnel Master -> DDR
+ */
+static const struct aperture axc001_axi_tunnel_memmap[16] = {
+ {AXC001_SLV_AXI_TUNNEL, 0x0},
+ {AXC001_SLV_AXI_TUNNEL, 0x1},
+ {AXC001_SLV_SRAM, 0x0},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_DDR_PORT1, 0x0},
+ {AXC001_SLV_DDR_PORT1, 0x1},
+ {AXC001_SLV_DDR_PORT1, 0x2},
+ {AXC001_SLV_DDR_PORT1, 0x3},
+ {AXC001_SLV_NONE, 0x0},
+ {AXC001_SLV_AXI_TUNNEL, 0xD},
+ {AXC001_SLV_AXI_TUNNEL, 0xE},
+ {AXC001_SLV_AXI2APB, 0x0},
+};
+
+/*
+ * memmap for MB AXI Masters
+ * Same mem map for all perip controllers as well as MB AXI Tunnel Master
+ */
+static const struct aperture axs_mb_memmap[16] = {
+ {AXS_MB_SLV_SRAM, 0x0},
+ {AXS_MB_SLV_SRAM, 0x0},
+ {AXS_MB_SLV_NONE, 0x0},
+ {AXS_MB_SLV_NONE, 0x0},
+ {AXS_MB_SLV_NONE, 0x0},
+ {AXS_MB_SLV_NONE, 0x0},
+ {AXS_MB_SLV_NONE, 0x0},
+ {AXS_MB_SLV_NONE, 0x0},
+ {AXS_MB_SLV_AXI_TUNNEL_CPU, 0x8}, /* DDR on CPU Card */
+ {AXS_MB_SLV_AXI_TUNNEL_CPU, 0x9}, /* DDR on CPU Card */
+ {AXS_MB_SLV_AXI_TUNNEL_CPU, 0xA},
+ {AXS_MB_SLV_AXI_TUNNEL_CPU, 0xB},
+ {AXS_MB_SLV_NONE, 0x0},
+ {AXS_MB_SLV_AXI_TUNNEL_HAPS, 0xD},
+ {AXS_MB_SLV_CONTROL, 0x0}, /* MB Local CREG, CGU... */
+ {AXS_MB_SLV_AXI_TUNNEL_CPU, 0xF},
+};
+
+static noinline void __init
+axs101_set_memmap(void __iomem *base, const struct aperture map[16])
+{
+ unsigned int slave_select, slave_offset;
+ int i;
+
+ slave_select = slave_offset = 0;
+ for (i = 0; i < 8; i++) {
+ slave_select |= map[i].slave_sel << (i << 2);
+ slave_offset |= map[i].slave_off << (i << 2);
+ }
+
+ iowrite32(slave_select, base + 0x0); /* SLV0 */
+ iowrite32(slave_offset, base + 0x8); /* OFFSET0 */
+
+ slave_select = slave_offset = 0;
+ for (i = 0; i < 8; i++) {
+ slave_select |= map[i+8].slave_sel << (i << 2);
+ slave_offset |= map[i+8].slave_off << (i << 2);
+ }
+
+ iowrite32(slave_select, base + 0x4); /* SLV1 */
+ iowrite32(slave_offset, base + 0xC); /* OFFSET1 */
+}
+
+static void __init axs101_early_init(void)
+{
+ int i;
+
+ /* ARC 770D memory view */
+ axs101_set_memmap((void __iomem *) CREG_CPU_ADDR_770, axc001_memmap);
+ iowrite32(1, (void __iomem *) CREG_CPU_ADDR_770_UPD);
+
+ /* AXI tunnel memory map (incoming traffic from MB into CPU Card */
+ axs101_set_memmap((void __iomem *) CREG_CPU_ADDR_TUNN,
+ axc001_axi_tunnel_memmap);
+ iowrite32(1, (void __iomem *) CREG_CPU_ADDR_TUNN_UPD);
+
+ /* MB peripherals memory map */
+ for (i = AXS_MB_MST_TUNNEL_CPU; i <= AXS_MB_MST_USB_OHCI; i++)
+ axs101_set_memmap((void __iomem *) AXS_MB_CREG + (i << 4),
+ axs_mb_memmap);
+
+ iowrite32(0x3ff, (void __iomem *) AXS_MB_CREG + 0x100); /* Update */
+
+ /* GPIO pins 18 and 19 are used as UART rx and tx, respectively. */
+ iowrite32(0x01, (void __iomem *) CREG_CPU_GPIO_UART_MUX);
+
+ /* Set up the MB interrupt system: mux interrupts to GPIO7) */
+ iowrite32(0x01, (void __iomem *) CREG_MB_IRQ_MUX);
+
+ /* reset ethernet and ULPI interfaces */
+ iowrite32(0x18, (void __iomem *) CREG_MB_SW_RESET);
+
+ /* map GPIO 14:10 to ARC 9:5 (IRQ mux change for MB v2 onwards) */
+ iowrite32(0x52, (void __iomem *) CREG_CPU_ARC770_IRQ_MUX);
+
+ axs10x_early_init();
+}
+
+#endif /* CONFIG_AXS101 */
+
+#ifdef CONFIG_AXS103
+
+#define AXC003_CREG 0xF0001000
+#define AXC003_MST_AXI_TUNNEL 0
+#define AXC003_MST_HS38 1
+
+#define CREG_CPU_AXI_M0_IRQ_MUX (AXC003_CREG + 0x440)
+#define CREG_CPU_GPIO_UART_MUX (AXC003_CREG + 0x480)
+#define CREG_CPU_TUN_IO_CTRL (AXC003_CREG + 0x494)
+
+
+static void __init axs103_early_init(void)
+{
+#ifdef CONFIG_ARC_MCIP
+ /*
+ * AXS103 configurations for SMP/QUAD configurations share device tree
+ * which defaults to 100 MHz. However recent failures of Quad config
+ * revealed P&R timing violations so clamp it down to safe 50 MHz
+ * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
+ * of fudging the freq in DT
+ */
+#define AXS103_QUAD_CORE_CPU_FREQ_HZ 50000000
+
+ unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
+ if (num_cores > 2) {
+ u32 freq;
+ int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk");
+ const struct fdt_property *prop;
+
+ prop = fdt_get_property(initial_boot_params, off,
+ "assigned-clock-rates", NULL);
+ freq = be32_to_cpu(*(u32 *)(prop->data));
+
+ /* Patching .dtb in-place with new core clock value */
+ if (freq != AXS103_QUAD_CORE_CPU_FREQ_HZ) {
+ freq = cpu_to_be32(AXS103_QUAD_CORE_CPU_FREQ_HZ);
+ fdt_setprop_inplace(initial_boot_params, off,
+ "assigned-clock-rates", &freq, sizeof(freq));
+ }
+ }
+#endif
+
+ /* Memory maps already config in pre-bootloader */
+
+ /* set GPIO mux to UART */
+ iowrite32(0x01, (void __iomem *) CREG_CPU_GPIO_UART_MUX);
+
+ iowrite32((0x00100000U | 0x000C0000U | 0x00003322U),
+ (void __iomem *) CREG_CPU_TUN_IO_CTRL);
+
+ /* Set up the AXS_MB interrupt system.*/
+ iowrite32(12, (void __iomem *) (CREG_CPU_AXI_M0_IRQ_MUX
+ + (AXC003_MST_HS38 << 2)));
+
+ /* connect ICTL - Main Board with GPIO line */
+ iowrite32(0x01, (void __iomem *) CREG_MB_IRQ_MUX);
+
+ axs10x_print_board_ver(AXC003_CREG + 4088, "AXC003 CPU Card");
+
+ axs10x_early_init();
+}
+#endif
+
+#ifdef CONFIG_AXS101
+
+static const char *axs101_compat[] __initconst = {
+ "snps,axs101",
+ NULL,
+};
+
+MACHINE_START(AXS101, "axs101")
+ .dt_compat = axs101_compat,
+ .init_early = axs101_early_init,
+MACHINE_END
+
+#endif /* CONFIG_AXS101 */
+
+#ifdef CONFIG_AXS103
+
+static const char *axs103_compat[] __initconst = {
+ "snps,axs103",
+ NULL,
+};
+
+MACHINE_START(AXS103, "axs103")
+ .dt_compat = axs103_compat,
+ .init_early = axs103_early_init,
+MACHINE_END
+
+/*
+ * For the VDK OS-kit, to get the offset to pid and command fields
+ */
+char coware_swa_pid_offset[TASK_PID];
+char coware_swa_comm_offset[TASK_COMM];
+
+#endif /* CONFIG_AXS103 */
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
new file mode 100644
index 0000000000..a2d10c29fb
--- /dev/null
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com)
+#
+
+menuconfig ARC_SOC_HSDK
+ bool "ARC HS Development Kit SOC"
+ depends on ISA_ARCV2
+ select ARC_HAS_ACCL_REGS
+ select ARC_IRQ_NO_AUTOSAVE
+ select ARC_FPU_SAVE_RESTORE
+ select CLK_HSDK
+ select RESET_CONTROLLER
+ select RESET_HSDK
+ select HAVE_PCI
diff --git a/arch/arc/plat-hsdk/Makefile b/arch/arc/plat-hsdk/Makefile
new file mode 100644
index 0000000000..bb2921e824
--- /dev/null
+++ b/arch/arc/plat-hsdk/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com)
+#
+
+obj-y := platform.o
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
new file mode 100644
index 0000000000..c4a875b223
--- /dev/null
+++ b/arch/arc/plat-hsdk/platform.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARC HSDK Platform support code
+ *
+ * Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/init.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+#include <linux/smp.h>
+#include <asm/arcregs.h>
+#include <asm/io.h>
+#include <asm/mach_desc.h>
+
+int arc_hsdk_axi_dmac_coherent __section(".data") = 0;
+
+#define ARC_CCM_UNUSED_ADDR 0x60000000
+
+
+#define ARC_PERIPHERAL_BASE 0xf0000000
+#define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000)
+
+#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
+#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
+#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
+
+#define HSDK_GPIO_INTC (ARC_PERIPHERAL_BASE + 0x3000)
+
+static void __init hsdk_enable_gpio_intc_wire(void)
+{
+ /*
+ * Peripherals on CPU Card are wired to cpu intc via intermediate
+ * DW APB GPIO blocks (mainly for debouncing)
+ *
+ * ---------------------
+ * | snps,archs-intc |
+ * ---------------------
+ * |
+ * ----------------------
+ * | snps,archs-idu-intc |
+ * ----------------------
+ * | | | | |
+ * | [eth] [USB] [... other peripherals]
+ * |
+ * -------------------
+ * | snps,dw-apb-intc |
+ * -------------------
+ * | | | |
+ * [Bt] [HAPS] [... other peripherals]
+ *
+ * Current implementation of "irq-dw-apb-ictl" driver doesn't work well
+ * with stacked INTCs. In particular problem happens if its master INTC
+ * not yet instantiated. See discussion here -
+ * https://lore.kernel.org/lkml/54F6FE2C.7020309@synopsys.com
+ *
+ * So setup the first gpio block as a passive pass thru and hide it from
+ * DT hardware topology - connect intc directly to cpu intc
+ * The GPIO "wire" needs to be init nevertheless (here)
+ *
+ * One side adv is that peripheral interrupt handling avoids one nested
+ * intc ISR hop
+ *
+ * According to HSDK User's Manual [1], "Table 2 Interrupt Mapping"
+ * we have the following GPIO input lines used as sources of interrupt:
+ * - GPIO[0] - Bluetooth interrupt of RS9113 module
+ * - GPIO[2] - HAPS interrupt (on HapsTrak 3 connector)
+ * - GPIO[3] - Audio codec (MAX9880A) interrupt
+ * - GPIO[8-23] - Available on Arduino and PMOD_x headers
+ * For now there's no use of Arduino and PMOD_x headers in Linux
+ * use-case so we only enable lines 0, 2 and 3.
+ *
+ * [1] https://github.com/foss-for-synopsys-dwc-arc-processors/ARC-Development-Systems-Forum/wiki/docs/ARC_HSDK_User_Guide.pdf
+ */
+#define GPIO_INTEN (HSDK_GPIO_INTC + 0x30)
+#define GPIO_INTMASK (HSDK_GPIO_INTC + 0x34)
+#define GPIO_INTTYPE_LEVEL (HSDK_GPIO_INTC + 0x38)
+#define GPIO_INT_POLARITY (HSDK_GPIO_INTC + 0x3c)
+#define GPIO_INT_CONNECTED_MASK 0x0d
+
+ iowrite32(0xffffffff, (void __iomem *) GPIO_INTMASK);
+ iowrite32(~GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTMASK);
+ iowrite32(0x00000000, (void __iomem *) GPIO_INTTYPE_LEVEL);
+ iowrite32(0xffffffff, (void __iomem *) GPIO_INT_POLARITY);
+ iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
+}
+
+static int __init hsdk_tweak_node_coherency(const char *path, bool coherent)
+{
+ void *fdt = initial_boot_params;
+ const void *prop;
+ int node, ret;
+ bool dt_coh_set;
+
+ node = fdt_path_offset(fdt, path);
+ if (node < 0)
+ goto tweak_fail;
+
+ prop = fdt_getprop(fdt, node, "dma-coherent", &ret);
+ if (!prop && ret != -FDT_ERR_NOTFOUND)
+ goto tweak_fail;
+
+ dt_coh_set = ret != -FDT_ERR_NOTFOUND;
+ ret = 0;
+
+ /* need to remove "dma-coherent" property */
+ if (dt_coh_set && !coherent)
+ ret = fdt_delprop(fdt, node, "dma-coherent");
+
+ /* need to set "dma-coherent" property */
+ if (!dt_coh_set && coherent)
+ ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0);
+
+ if (ret < 0)
+ goto tweak_fail;
+
+ return 0;
+
+tweak_fail:
+ pr_err("failed to tweak %s to %scoherent\n", path, coherent ? "" : "non");
+ return -EFAULT;
+}
+
+enum hsdk_axi_masters {
+ M_HS_CORE = 0,
+ M_HS_RTT,
+ M_AXI_TUN,
+ M_HDMI_VIDEO,
+ M_HDMI_AUDIO,
+ M_USB_HOST,
+ M_ETHERNET,
+ M_SDIO,
+ M_GPU,
+ M_DMAC_0,
+ M_DMAC_1,
+ M_DVFS
+};
+
+#define UPDATE_VAL 1
+
+/*
+ * This is modified configuration of AXI bridge. Default settings
+ * are specified in "Table 111 CREG Address Decoder register reset values".
+ *
+ * AXI_M_m_SLV{0|1} - Slave Select register for master 'm'.
+ * Possible slaves are:
+ * - 0 => no slave selected
+ * - 1 => DDR controller port #1
+ * - 2 => SRAM controller
+ * - 3 => AXI tunnel
+ * - 4 => EBI controller
+ * - 5 => ROM controller
+ * - 6 => AXI2APB bridge
+ * - 7 => DDR controller port #2
+ * - 8 => DDR controller port #3
+ * - 9 => HS38x4 IOC
+ * - 10 => HS38x4 DMI
+ * AXI_M_m_OFFSET{0|1} - Addr Offset register for master 'm'
+ *
+ * Please read ARC HS Development IC Specification, section 17.2 for more
+ * information about apertures configuration.
+ *
+ * m master AXI_M_m_SLV0 AXI_M_m_SLV1 AXI_M_m_OFFSET0 AXI_M_m_OFFSET1
+ * 0 HS (CBU) 0x11111111 0x63111111 0xFEDCBA98 0x0E543210
+ * 1 HS (RTT) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
+ * 2 AXI Tunnel 0x88888888 0x88888888 0xFEDCBA98 0x76543210
+ * 3 HDMI-VIDEO 0x77777777 0x77777777 0xFEDCBA98 0x76543210
+ * 4 HDMI-ADUIO 0x77777777 0x77777777 0xFEDCBA98 0x76543210
+ * 5 USB-HOST 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
+ * 6 ETHERNET 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
+ * 7 SDIO 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
+ * 8 GPU 0x77777777 0x77777777 0xFEDCBA98 0x76543210
+ * 9 DMAC (port #1) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
+ * 10 DMAC (port #2) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
+ * 11 DVFS 0x00000000 0x60000000 0x00000000 0x00000000
+ */
+
+#define CREG_AXI_M_SLV0(m) ((void __iomem *)(CREG_BASE + 0x20 * (m)))
+#define CREG_AXI_M_SLV1(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x04))
+#define CREG_AXI_M_OFT0(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x08))
+#define CREG_AXI_M_OFT1(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x0C))
+#define CREG_AXI_M_UPDT(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x14))
+
+#define CREG_AXI_M_HS_CORE_BOOT ((void __iomem *)(CREG_BASE + 0x010))
+
+#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180))
+#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194))
+
+static void __init hsdk_init_memory_bridge_axi_dmac(void)
+{
+ bool coherent = !!arc_hsdk_axi_dmac_coherent;
+ u32 axi_m_slv1, axi_m_oft1;
+
+ /*
+ * Don't tweak memory bridge configuration if we failed to tweak DTB
+ * as we will end up in a inconsistent state.
+ */
+ if (hsdk_tweak_node_coherency("/soc/dmac@80000", coherent))
+ return;
+
+ if (coherent) {
+ axi_m_slv1 = 0x77999999;
+ axi_m_oft1 = 0x76DCBA98;
+ } else {
+ axi_m_slv1 = 0x77777777;
+ axi_m_oft1 = 0x76543210;
+ }
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
+ writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_0));
+ writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_0));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
+ writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_1));
+ writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_1));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
+}
+
+static void __init hsdk_init_memory_bridge(void)
+{
+ u32 reg;
+
+ /*
+ * M_HS_CORE has one unique register - BOOT.
+ * We need to clean boot mirror (BOOT[1:0]) bits in them to avoid first
+ * aperture to be masked by 'boot mirror'.
+ */
+ reg = readl(CREG_AXI_M_HS_CORE_BOOT) & (~0x3);
+ writel(reg, CREG_AXI_M_HS_CORE_BOOT);
+ writel(0x11111111, CREG_AXI_M_SLV0(M_HS_CORE));
+ writel(0x63111111, CREG_AXI_M_SLV1(M_HS_CORE));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HS_CORE));
+ writel(0x0E543210, CREG_AXI_M_OFT1(M_HS_CORE));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HS_CORE));
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_HS_RTT));
+ writel(0x77777777, CREG_AXI_M_SLV1(M_HS_RTT));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HS_RTT));
+ writel(0x76543210, CREG_AXI_M_OFT1(M_HS_RTT));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HS_RTT));
+
+ writel(0x88888888, CREG_AXI_M_SLV0(M_AXI_TUN));
+ writel(0x88888888, CREG_AXI_M_SLV1(M_AXI_TUN));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_AXI_TUN));
+ writel(0x76543210, CREG_AXI_M_OFT1(M_AXI_TUN));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_AXI_TUN));
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_HDMI_VIDEO));
+ writel(0x77777777, CREG_AXI_M_SLV1(M_HDMI_VIDEO));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HDMI_VIDEO));
+ writel(0x76543210, CREG_AXI_M_OFT1(M_HDMI_VIDEO));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HDMI_VIDEO));
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_HDMI_AUDIO));
+ writel(0x77777777, CREG_AXI_M_SLV1(M_HDMI_AUDIO));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HDMI_AUDIO));
+ writel(0x76543210, CREG_AXI_M_OFT1(M_HDMI_AUDIO));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HDMI_AUDIO));
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_USB_HOST));
+ writel(0x77999999, CREG_AXI_M_SLV1(M_USB_HOST));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_USB_HOST));
+ writel(0x76DCBA98, CREG_AXI_M_OFT1(M_USB_HOST));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_USB_HOST));
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_ETHERNET));
+ writel(0x77999999, CREG_AXI_M_SLV1(M_ETHERNET));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_ETHERNET));
+ writel(0x76DCBA98, CREG_AXI_M_OFT1(M_ETHERNET));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_ETHERNET));
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_SDIO));
+ writel(0x77999999, CREG_AXI_M_SLV1(M_SDIO));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_SDIO));
+ writel(0x76DCBA98, CREG_AXI_M_OFT1(M_SDIO));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_SDIO));
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_GPU));
+ writel(0x77777777, CREG_AXI_M_SLV1(M_GPU));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_GPU));
+ writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
+
+ writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
+ writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
+ writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
+ writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
+
+ hsdk_init_memory_bridge_axi_dmac();
+
+ /*
+ * PAE remapping for DMA clients does not work due to an RTL bug, so
+ * CREG_PAE register must be programmed to all zeroes, otherwise it
+ * will cause problems with DMA to/from peripherals even if PAE40 is
+ * not used.
+ */
+ writel(0x00000000, CREG_PAE);
+ writel(UPDATE_VAL, CREG_PAE_UPDT);
+}
+
+static void __init hsdk_init_early(void)
+{
+ hsdk_init_memory_bridge();
+
+ /*
+ * Switch SDIO external ciu clock divider from default div-by-8 to
+ * minimum possible div-by-2.
+ */
+ iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
+
+ hsdk_enable_gpio_intc_wire();
+}
+
+static const char *hsdk_compat[] __initconst = {
+ "snps,hsdk",
+ NULL,
+};
+
+MACHINE_START(SIMULATION, "hsdk")
+ .dt_compat = hsdk_compat,
+ .init_early = hsdk_init_early,
+MACHINE_END
diff --git a/arch/arc/plat-sim/Makefile b/arch/arc/plat-sim/Makefile
new file mode 100644
index 0000000000..ea9389bf8b
--- /dev/null
+++ b/arch/arc/plat-sim/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+
+obj-y := platform.o
diff --git a/arch/arc/plat-sim/platform.c b/arch/arc/plat-sim/platform.c
new file mode 100644
index 0000000000..2bde2a6e33
--- /dev/null
+++ b/arch/arc/plat-sim/platform.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARC simulation Platform support code
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/init.h>
+#include <asm/mach_desc.h>
+
+/*----------------------- Machine Descriptions ------------------------------
+ *
+ * Machine description is simply a set of platform/board specific callbacks
+ * This is not directly related to DeviceTree based dynamic device creation,
+ * however as part of early device tree scan, we also select the right
+ * callback set, by matching the DT compatible name.
+ */
+
+static const char *simulation_compat[] __initconst = {
+#ifdef CONFIG_ISA_ARCOMPACT
+ "snps,nsim",
+ "snps,nsimosci",
+#else
+ "snps,nsimosci_hs",
+ "snps,zebu_hs",
+#endif
+ NULL,
+};
+
+MACHINE_START(SIMULATION, "simulation")
+ .dt_compat = simulation_compat,
+MACHINE_END
diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig
new file mode 100644
index 0000000000..158d53aad4
--- /dev/null
+++ b/arch/arc/plat-tb10x/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Abilis Systems TB10x platform kernel configuration file
+#
+# Author: Christian Ruppert <christian.ruppert@abilis.com>
+#
+
+
+menuconfig ARC_PLAT_TB10X
+ bool "Abilis TB10x"
+ select PINCTRL
+ select PINCTRL_TB10X
+ select PINMUX
+ select GPIOLIB
+ select GPIO_TB10X
+ select TB10X_IRQC
+ help
+ Support for platforms based on the TB10x home media gateway SOC by
+ Abilis Systems. TB10x is based on the ARC700 CPU architecture.
+ Say Y if you are building a kernel for one of the SOCs in this
+ series (e.g. TB100 or TB101). If in doubt say N.
diff --git a/arch/arc/plat-tb10x/Makefile b/arch/arc/plat-tb10x/Makefile
new file mode 100644
index 0000000000..8e97d454e7
--- /dev/null
+++ b/arch/arc/plat-tb10x/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Abilis Systems TB10x platform Makefile
+#
+# Author: Christian Ruppert <christian.ruppert@abilis.com>
+#
+
+
+KBUILD_CFLAGS += -Iarch/arc/plat-tb10x/include
+
+obj-y += tb10x.o
diff --git a/arch/arc/plat-tb10x/tb10x.c b/arch/arc/plat-tb10x/tb10x.c
new file mode 100644
index 0000000000..11d23420f9
--- /dev/null
+++ b/arch/arc/plat-tb10x/tb10x.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Abilis Systems TB10x platform initialisation
+ *
+ * Copyright (C) Abilis Systems 2012
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ */
+
+#include <linux/init.h>
+#include <asm/mach_desc.h>
+
+static const char *tb10x_compat[] __initdata = {
+ "abilis,arc-tb10x",
+ NULL,
+};
+
+MACHINE_START(TB10x, "tb10x")
+ .dt_compat = tb10x_compat,
+MACHINE_END